1 //===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
8 /// \file This file implements the utility functions used by the GlobalISel
10 //===----------------------------------------------------------------------===//
12 #include "llvm/CodeGen/GlobalISel/Utils.h"
13 #include "llvm/ADT/APFloat.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/CodeGen/CodeGenCommonISel.h"
16 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
17 #include "llvm/CodeGen/GlobalISel/GISelKnownBits.h"
18 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
19 #include "llvm/CodeGen/GlobalISel/LostDebugLocObserver.h"
20 #include "llvm/CodeGen/GlobalISel/MIPatternMatch.h"
21 #include "llvm/CodeGen/MachineInstr.h"
22 #include "llvm/CodeGen/MachineInstrBuilder.h"
23 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/MachineSizeOpts.h"
26 #include "llvm/CodeGen/RegisterBankInfo.h"
27 #include "llvm/CodeGen/StackProtector.h"
28 #include "llvm/CodeGen/TargetInstrInfo.h"
29 #include "llvm/CodeGen/TargetLowering.h"
30 #include "llvm/CodeGen/TargetPassConfig.h"
31 #include "llvm/CodeGen/TargetRegisterInfo.h"
32 #include "llvm/IR/Constants.h"
33 #include "llvm/Target/TargetMachine.h"
34 #include "llvm/Transforms/Utils/SizeOpts.h"
38 #define DEBUG_TYPE "globalisel-utils"
41 using namespace MIPatternMatch
;
43 Register
llvm::constrainRegToClass(MachineRegisterInfo
&MRI
,
44 const TargetInstrInfo
&TII
,
45 const RegisterBankInfo
&RBI
, Register Reg
,
46 const TargetRegisterClass
&RegClass
) {
47 if (!RBI
.constrainGenericRegister(Reg
, RegClass
, MRI
))
48 return MRI
.createVirtualRegister(&RegClass
);
53 Register
llvm::constrainOperandRegClass(
54 const MachineFunction
&MF
, const TargetRegisterInfo
&TRI
,
55 MachineRegisterInfo
&MRI
, const TargetInstrInfo
&TII
,
56 const RegisterBankInfo
&RBI
, MachineInstr
&InsertPt
,
57 const TargetRegisterClass
&RegClass
, MachineOperand
&RegMO
) {
58 Register Reg
= RegMO
.getReg();
59 // Assume physical registers are properly constrained.
60 assert(Reg
.isVirtual() && "PhysReg not implemented");
62 // Save the old register class to check whether
63 // the change notifications will be required.
64 // TODO: A better approach would be to pass
65 // the observers to constrainRegToClass().
66 auto *OldRegClass
= MRI
.getRegClassOrNull(Reg
);
67 Register ConstrainedReg
= constrainRegToClass(MRI
, TII
, RBI
, Reg
, RegClass
);
68 // If we created a new virtual register because the class is not compatible
69 // then create a copy between the new and the old register.
70 if (ConstrainedReg
!= Reg
) {
71 MachineBasicBlock::iterator
InsertIt(&InsertPt
);
72 MachineBasicBlock
&MBB
= *InsertPt
.getParent();
73 // FIXME: The copy needs to have the classes constrained for its operands.
74 // Use operand's regbank to get the class for old register (Reg).
76 BuildMI(MBB
, InsertIt
, InsertPt
.getDebugLoc(),
77 TII
.get(TargetOpcode::COPY
), ConstrainedReg
)
80 assert(RegMO
.isDef() && "Must be a definition");
81 BuildMI(MBB
, std::next(InsertIt
), InsertPt
.getDebugLoc(),
82 TII
.get(TargetOpcode::COPY
), Reg
)
83 .addReg(ConstrainedReg
);
85 if (GISelChangeObserver
*Observer
= MF
.getObserver()) {
86 Observer
->changingInstr(*RegMO
.getParent());
88 RegMO
.setReg(ConstrainedReg
);
89 if (GISelChangeObserver
*Observer
= MF
.getObserver()) {
90 Observer
->changedInstr(*RegMO
.getParent());
92 } else if (OldRegClass
!= MRI
.getRegClassOrNull(Reg
)) {
93 if (GISelChangeObserver
*Observer
= MF
.getObserver()) {
95 MachineInstr
*RegDef
= MRI
.getVRegDef(Reg
);
96 Observer
->changedInstr(*RegDef
);
98 Observer
->changingAllUsesOfReg(MRI
, Reg
);
99 Observer
->finishedChangingAllUsesOfReg();
102 return ConstrainedReg
;
105 Register
llvm::constrainOperandRegClass(
106 const MachineFunction
&MF
, const TargetRegisterInfo
&TRI
,
107 MachineRegisterInfo
&MRI
, const TargetInstrInfo
&TII
,
108 const RegisterBankInfo
&RBI
, MachineInstr
&InsertPt
, const MCInstrDesc
&II
,
109 MachineOperand
&RegMO
, unsigned OpIdx
) {
110 Register Reg
= RegMO
.getReg();
111 // Assume physical registers are properly constrained.
112 assert(Reg
.isVirtual() && "PhysReg not implemented");
114 const TargetRegisterClass
*OpRC
= TII
.getRegClass(II
, OpIdx
, &TRI
, MF
);
115 // Some of the target independent instructions, like COPY, may not impose any
116 // register class constraints on some of their operands: If it's a use, we can
117 // skip constraining as the instruction defining the register would constrain
121 // Obtain the RC from incoming regbank if it is a proper sub-class. Operands
122 // can have multiple regbanks for a superclass that combine different
123 // register types (E.g., AMDGPU's VGPR and AGPR). The regbank ambiguity
124 // resolved by targets during regbankselect should not be overridden.
125 if (const auto *SubRC
= TRI
.getCommonSubClass(
126 OpRC
, TRI
.getConstrainedRegClassForOperand(RegMO
, MRI
)))
129 OpRC
= TRI
.getAllocatableClass(OpRC
);
133 assert((!isTargetSpecificOpcode(II
.getOpcode()) || RegMO
.isUse()) &&
134 "Register class constraint is required unless either the "
135 "instruction is target independent or the operand is a use");
136 // FIXME: Just bailing out like this here could be not enough, unless we
137 // expect the users of this function to do the right thing for PHIs and
141 // v1 here may end up not being constrained at all. Please notice that to
142 // reproduce the issue we likely need a destination pattern of a selection
143 // rule producing such extra copies, not just an input GMIR with them as
144 // every existing target using selectImpl handles copies before calling it
145 // and they never reach this function.
148 return constrainOperandRegClass(MF
, TRI
, MRI
, TII
, RBI
, InsertPt
, *OpRC
,
152 bool llvm::constrainSelectedInstRegOperands(MachineInstr
&I
,
153 const TargetInstrInfo
&TII
,
154 const TargetRegisterInfo
&TRI
,
155 const RegisterBankInfo
&RBI
) {
156 assert(!isPreISelGenericOpcode(I
.getOpcode()) &&
157 "A selected instruction is expected");
158 MachineBasicBlock
&MBB
= *I
.getParent();
159 MachineFunction
&MF
= *MBB
.getParent();
160 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
162 for (unsigned OpI
= 0, OpE
= I
.getNumExplicitOperands(); OpI
!= OpE
; ++OpI
) {
163 MachineOperand
&MO
= I
.getOperand(OpI
);
165 // There's nothing to be done on non-register operands.
169 LLVM_DEBUG(dbgs() << "Converting operand: " << MO
<< '\n');
170 assert(MO
.isReg() && "Unsupported non-reg operand");
172 Register Reg
= MO
.getReg();
173 // Physical registers don't need to be constrained.
174 if (Reg
.isPhysical())
177 // Register operands with a value of 0 (e.g. predicate operands) don't need
178 // to be constrained.
182 // If the operand is a vreg, we should constrain its regclass, and only
183 // insert COPYs if that's impossible.
184 // constrainOperandRegClass does that for us.
185 constrainOperandRegClass(MF
, TRI
, MRI
, TII
, RBI
, I
, I
.getDesc(), MO
, OpI
);
187 // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
190 int DefIdx
= I
.getDesc().getOperandConstraint(OpI
, MCOI::TIED_TO
);
191 if (DefIdx
!= -1 && !I
.isRegTiedToUseOperand(DefIdx
))
192 I
.tieOperands(DefIdx
, OpI
);
198 bool llvm::canReplaceReg(Register DstReg
, Register SrcReg
,
199 MachineRegisterInfo
&MRI
) {
200 // Give up if either DstReg or SrcReg is a physical register.
201 if (DstReg
.isPhysical() || SrcReg
.isPhysical())
203 // Give up if the types don't match.
204 if (MRI
.getType(DstReg
) != MRI
.getType(SrcReg
))
206 // Replace if either DstReg has no constraints or the register
207 // constraints match.
208 const auto &DstRBC
= MRI
.getRegClassOrRegBank(DstReg
);
209 if (!DstRBC
|| DstRBC
== MRI
.getRegClassOrRegBank(SrcReg
))
212 // Otherwise match if the Src is already a regclass that is covered by the Dst
214 return DstRBC
.is
<const RegisterBank
*>() && MRI
.getRegClassOrNull(SrcReg
) &&
215 DstRBC
.get
<const RegisterBank
*>()->covers(
216 *MRI
.getRegClassOrNull(SrcReg
));
219 bool llvm::isTriviallyDead(const MachineInstr
&MI
,
220 const MachineRegisterInfo
&MRI
) {
221 // FIXME: This logical is mostly duplicated with
222 // DeadMachineInstructionElim::isDead. Why is LOCAL_ESCAPE not considered in
223 // MachineInstr::isLabel?
225 // Don't delete frame allocation labels.
226 if (MI
.getOpcode() == TargetOpcode::LOCAL_ESCAPE
)
228 // LIFETIME markers should be preserved even if they seem dead.
229 if (MI
.getOpcode() == TargetOpcode::LIFETIME_START
||
230 MI
.getOpcode() == TargetOpcode::LIFETIME_END
)
233 // If we can move an instruction, we can remove it. Otherwise, it has
234 // a side-effect of some sort.
235 bool SawStore
= false;
236 if (!MI
.isSafeToMove(/*AA=*/nullptr, SawStore
) && !MI
.isPHI())
239 // Instructions without side-effects are dead iff they only define dead vregs.
240 for (const auto &MO
: MI
.all_defs()) {
241 Register Reg
= MO
.getReg();
242 if (Reg
.isPhysical() || !MRI
.use_nodbg_empty(Reg
))
248 static void reportGISelDiagnostic(DiagnosticSeverity Severity
,
250 const TargetPassConfig
&TPC
,
251 MachineOptimizationRemarkEmitter
&MORE
,
252 MachineOptimizationRemarkMissed
&R
) {
253 bool IsFatal
= Severity
== DS_Error
&&
254 TPC
.isGlobalISelAbortEnabled();
255 // Print the function name explicitly if we don't have a debug location (which
256 // makes the diagnostic less useful) or if we're going to emit a raw error.
257 if (!R
.getLocation().isValid() || IsFatal
)
258 R
<< (" (in function: " + MF
.getName() + ")").str();
261 report_fatal_error(Twine(R
.getMsg()));
266 void llvm::reportGISelWarning(MachineFunction
&MF
, const TargetPassConfig
&TPC
,
267 MachineOptimizationRemarkEmitter
&MORE
,
268 MachineOptimizationRemarkMissed
&R
) {
269 reportGISelDiagnostic(DS_Warning
, MF
, TPC
, MORE
, R
);
272 void llvm::reportGISelFailure(MachineFunction
&MF
, const TargetPassConfig
&TPC
,
273 MachineOptimizationRemarkEmitter
&MORE
,
274 MachineOptimizationRemarkMissed
&R
) {
275 MF
.getProperties().set(MachineFunctionProperties::Property::FailedISel
);
276 reportGISelDiagnostic(DS_Error
, MF
, TPC
, MORE
, R
);
279 void llvm::reportGISelFailure(MachineFunction
&MF
, const TargetPassConfig
&TPC
,
280 MachineOptimizationRemarkEmitter
&MORE
,
281 const char *PassName
, StringRef Msg
,
282 const MachineInstr
&MI
) {
283 MachineOptimizationRemarkMissed
R(PassName
, "GISelFailure: ",
284 MI
.getDebugLoc(), MI
.getParent());
286 // Printing MI is expensive; only do it if expensive remarks are enabled.
287 if (TPC
.isGlobalISelAbortEnabled() || MORE
.allowExtraAnalysis(PassName
))
288 R
<< ": " << ore::MNV("Inst", MI
);
289 reportGISelFailure(MF
, TPC
, MORE
, R
);
292 std::optional
<APInt
> llvm::getIConstantVRegVal(Register VReg
,
293 const MachineRegisterInfo
&MRI
,
294 bool LookThroughInstrs
) {
295 std::optional
<ValueAndVReg
> ValAndVReg
=
296 getIConstantVRegValWithLookThrough(VReg
, MRI
, LookThroughInstrs
);
297 assert((!ValAndVReg
|| ValAndVReg
->VReg
== VReg
) &&
298 "Value found while looking through instrs");
301 return ValAndVReg
->Value
;
304 std::optional
<int64_t>
305 llvm::getIConstantVRegSExtVal(Register VReg
, const MachineRegisterInfo
&MRI
,
306 bool LookThroughInstrs
) {
307 std::optional
<APInt
> Val
= getIConstantVRegVal(VReg
, MRI
, LookThroughInstrs
);
308 if (Val
&& Val
->getBitWidth() <= 64)
309 return Val
->getSExtValue();
315 typedef std::function
<bool(const MachineInstr
*)> IsOpcodeFn
;
316 typedef std::function
<std::optional
<APInt
>(const MachineInstr
*MI
)> GetAPCstFn
;
318 std::optional
<ValueAndVReg
> getConstantVRegValWithLookThrough(
319 Register VReg
, const MachineRegisterInfo
&MRI
, IsOpcodeFn IsConstantOpcode
,
320 GetAPCstFn getAPCstValue
, bool LookThroughInstrs
= true,
321 bool LookThroughAnyExt
= false) {
322 SmallVector
<std::pair
<unsigned, unsigned>, 4> SeenOpcodes
;
325 while ((MI
= MRI
.getVRegDef(VReg
)) && !IsConstantOpcode(MI
) &&
327 switch (MI
->getOpcode()) {
328 case TargetOpcode::G_ANYEXT
:
329 if (!LookThroughAnyExt
)
332 case TargetOpcode::G_TRUNC
:
333 case TargetOpcode::G_SEXT
:
334 case TargetOpcode::G_ZEXT
:
335 SeenOpcodes
.push_back(std::make_pair(
337 MRI
.getType(MI
->getOperand(0).getReg()).getSizeInBits()));
338 VReg
= MI
->getOperand(1).getReg();
340 case TargetOpcode::COPY
:
341 VReg
= MI
->getOperand(1).getReg();
342 if (VReg
.isPhysical())
345 case TargetOpcode::G_INTTOPTR
:
346 VReg
= MI
->getOperand(1).getReg();
352 if (!MI
|| !IsConstantOpcode(MI
))
355 std::optional
<APInt
> MaybeVal
= getAPCstValue(MI
);
358 APInt
&Val
= *MaybeVal
;
359 while (!SeenOpcodes
.empty()) {
360 std::pair
<unsigned, unsigned> OpcodeAndSize
= SeenOpcodes
.pop_back_val();
361 switch (OpcodeAndSize
.first
) {
362 case TargetOpcode::G_TRUNC
:
363 Val
= Val
.trunc(OpcodeAndSize
.second
);
365 case TargetOpcode::G_ANYEXT
:
366 case TargetOpcode::G_SEXT
:
367 Val
= Val
.sext(OpcodeAndSize
.second
);
369 case TargetOpcode::G_ZEXT
:
370 Val
= Val
.zext(OpcodeAndSize
.second
);
375 return ValueAndVReg
{Val
, VReg
};
378 bool isIConstant(const MachineInstr
*MI
) {
381 return MI
->getOpcode() == TargetOpcode::G_CONSTANT
;
384 bool isFConstant(const MachineInstr
*MI
) {
387 return MI
->getOpcode() == TargetOpcode::G_FCONSTANT
;
390 bool isAnyConstant(const MachineInstr
*MI
) {
393 unsigned Opc
= MI
->getOpcode();
394 return Opc
== TargetOpcode::G_CONSTANT
|| Opc
== TargetOpcode::G_FCONSTANT
;
397 std::optional
<APInt
> getCImmAsAPInt(const MachineInstr
*MI
) {
398 const MachineOperand
&CstVal
= MI
->getOperand(1);
400 return CstVal
.getCImm()->getValue();
404 std::optional
<APInt
> getCImmOrFPImmAsAPInt(const MachineInstr
*MI
) {
405 const MachineOperand
&CstVal
= MI
->getOperand(1);
407 return CstVal
.getCImm()->getValue();
408 if (CstVal
.isFPImm())
409 return CstVal
.getFPImm()->getValueAPF().bitcastToAPInt();
413 } // end anonymous namespace
415 std::optional
<ValueAndVReg
> llvm::getIConstantVRegValWithLookThrough(
416 Register VReg
, const MachineRegisterInfo
&MRI
, bool LookThroughInstrs
) {
417 return getConstantVRegValWithLookThrough(VReg
, MRI
, isIConstant
,
418 getCImmAsAPInt
, LookThroughInstrs
);
421 std::optional
<ValueAndVReg
> llvm::getAnyConstantVRegValWithLookThrough(
422 Register VReg
, const MachineRegisterInfo
&MRI
, bool LookThroughInstrs
,
423 bool LookThroughAnyExt
) {
424 return getConstantVRegValWithLookThrough(
425 VReg
, MRI
, isAnyConstant
, getCImmOrFPImmAsAPInt
, LookThroughInstrs
,
429 std::optional
<FPValueAndVReg
> llvm::getFConstantVRegValWithLookThrough(
430 Register VReg
, const MachineRegisterInfo
&MRI
, bool LookThroughInstrs
) {
431 auto Reg
= getConstantVRegValWithLookThrough(
432 VReg
, MRI
, isFConstant
, getCImmOrFPImmAsAPInt
, LookThroughInstrs
);
435 return FPValueAndVReg
{getConstantFPVRegVal(Reg
->VReg
, MRI
)->getValueAPF(),
440 llvm::getConstantFPVRegVal(Register VReg
, const MachineRegisterInfo
&MRI
) {
441 MachineInstr
*MI
= MRI
.getVRegDef(VReg
);
442 if (TargetOpcode::G_FCONSTANT
!= MI
->getOpcode())
444 return MI
->getOperand(1).getFPImm();
447 std::optional
<DefinitionAndSourceRegister
>
448 llvm::getDefSrcRegIgnoringCopies(Register Reg
, const MachineRegisterInfo
&MRI
) {
449 Register DefSrcReg
= Reg
;
450 auto *DefMI
= MRI
.getVRegDef(Reg
);
451 auto DstTy
= MRI
.getType(DefMI
->getOperand(0).getReg());
452 if (!DstTy
.isValid())
454 unsigned Opc
= DefMI
->getOpcode();
455 while (Opc
== TargetOpcode::COPY
|| isPreISelGenericOptimizationHint(Opc
)) {
456 Register SrcReg
= DefMI
->getOperand(1).getReg();
457 auto SrcTy
= MRI
.getType(SrcReg
);
458 if (!SrcTy
.isValid())
460 DefMI
= MRI
.getVRegDef(SrcReg
);
462 Opc
= DefMI
->getOpcode();
464 return DefinitionAndSourceRegister
{DefMI
, DefSrcReg
};
467 MachineInstr
*llvm::getDefIgnoringCopies(Register Reg
,
468 const MachineRegisterInfo
&MRI
) {
469 std::optional
<DefinitionAndSourceRegister
> DefSrcReg
=
470 getDefSrcRegIgnoringCopies(Reg
, MRI
);
471 return DefSrcReg
? DefSrcReg
->MI
: nullptr;
474 Register
llvm::getSrcRegIgnoringCopies(Register Reg
,
475 const MachineRegisterInfo
&MRI
) {
476 std::optional
<DefinitionAndSourceRegister
> DefSrcReg
=
477 getDefSrcRegIgnoringCopies(Reg
, MRI
);
478 return DefSrcReg
? DefSrcReg
->Reg
: Register();
481 MachineInstr
*llvm::getOpcodeDef(unsigned Opcode
, Register Reg
,
482 const MachineRegisterInfo
&MRI
) {
483 MachineInstr
*DefMI
= getDefIgnoringCopies(Reg
, MRI
);
484 return DefMI
&& DefMI
->getOpcode() == Opcode
? DefMI
: nullptr;
487 APFloat
llvm::getAPFloatFromSize(double Val
, unsigned Size
) {
489 return APFloat(float(Val
));
493 llvm_unreachable("Unsupported FPConstant size");
496 APF
.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven
, &Ignored
);
500 std::optional
<APInt
> llvm::ConstantFoldBinOp(unsigned Opcode
,
503 const MachineRegisterInfo
&MRI
) {
504 auto MaybeOp2Cst
= getAnyConstantVRegValWithLookThrough(Op2
, MRI
, false);
508 auto MaybeOp1Cst
= getAnyConstantVRegValWithLookThrough(Op1
, MRI
, false);
512 const APInt
&C1
= MaybeOp1Cst
->Value
;
513 const APInt
&C2
= MaybeOp2Cst
->Value
;
517 case TargetOpcode::G_ADD
:
518 case TargetOpcode::G_PTR_ADD
:
520 case TargetOpcode::G_AND
:
522 case TargetOpcode::G_ASHR
:
524 case TargetOpcode::G_LSHR
:
526 case TargetOpcode::G_MUL
:
528 case TargetOpcode::G_OR
:
530 case TargetOpcode::G_SHL
:
532 case TargetOpcode::G_SUB
:
534 case TargetOpcode::G_XOR
:
536 case TargetOpcode::G_UDIV
:
537 if (!C2
.getBoolValue())
540 case TargetOpcode::G_SDIV
:
541 if (!C2
.getBoolValue())
544 case TargetOpcode::G_UREM
:
545 if (!C2
.getBoolValue())
548 case TargetOpcode::G_SREM
:
549 if (!C2
.getBoolValue())
552 case TargetOpcode::G_SMIN
:
553 return APIntOps::smin(C1
, C2
);
554 case TargetOpcode::G_SMAX
:
555 return APIntOps::smax(C1
, C2
);
556 case TargetOpcode::G_UMIN
:
557 return APIntOps::umin(C1
, C2
);
558 case TargetOpcode::G_UMAX
:
559 return APIntOps::umax(C1
, C2
);
565 std::optional
<APFloat
>
566 llvm::ConstantFoldFPBinOp(unsigned Opcode
, const Register Op1
,
567 const Register Op2
, const MachineRegisterInfo
&MRI
) {
568 const ConstantFP
*Op2Cst
= getConstantFPVRegVal(Op2
, MRI
);
572 const ConstantFP
*Op1Cst
= getConstantFPVRegVal(Op1
, MRI
);
576 APFloat C1
= Op1Cst
->getValueAPF();
577 const APFloat
&C2
= Op2Cst
->getValueAPF();
579 case TargetOpcode::G_FADD
:
580 C1
.add(C2
, APFloat::rmNearestTiesToEven
);
582 case TargetOpcode::G_FSUB
:
583 C1
.subtract(C2
, APFloat::rmNearestTiesToEven
);
585 case TargetOpcode::G_FMUL
:
586 C1
.multiply(C2
, APFloat::rmNearestTiesToEven
);
588 case TargetOpcode::G_FDIV
:
589 C1
.divide(C2
, APFloat::rmNearestTiesToEven
);
591 case TargetOpcode::G_FREM
:
594 case TargetOpcode::G_FCOPYSIGN
:
597 case TargetOpcode::G_FMINNUM
:
598 return minnum(C1
, C2
);
599 case TargetOpcode::G_FMAXNUM
:
600 return maxnum(C1
, C2
);
601 case TargetOpcode::G_FMINIMUM
:
602 return minimum(C1
, C2
);
603 case TargetOpcode::G_FMAXIMUM
:
604 return maximum(C1
, C2
);
605 case TargetOpcode::G_FMINNUM_IEEE
:
606 case TargetOpcode::G_FMAXNUM_IEEE
:
607 // FIXME: These operations were unfortunately named. fminnum/fmaxnum do not
608 // follow the IEEE behavior for signaling nans and follow libm's fmin/fmax,
609 // and currently there isn't a nice wrapper in APFloat for the version with
610 // correct snan handling.
620 llvm::ConstantFoldVectorBinop(unsigned Opcode
, const Register Op1
,
622 const MachineRegisterInfo
&MRI
) {
623 auto *SrcVec2
= getOpcodeDef
<GBuildVector
>(Op2
, MRI
);
625 return SmallVector
<APInt
>();
627 auto *SrcVec1
= getOpcodeDef
<GBuildVector
>(Op1
, MRI
);
629 return SmallVector
<APInt
>();
631 SmallVector
<APInt
> FoldedElements
;
632 for (unsigned Idx
= 0, E
= SrcVec1
->getNumSources(); Idx
< E
; ++Idx
) {
633 auto MaybeCst
= ConstantFoldBinOp(Opcode
, SrcVec1
->getSourceReg(Idx
),
634 SrcVec2
->getSourceReg(Idx
), MRI
);
636 return SmallVector
<APInt
>();
637 FoldedElements
.push_back(*MaybeCst
);
639 return FoldedElements
;
642 bool llvm::isKnownNeverNaN(Register Val
, const MachineRegisterInfo
&MRI
,
644 const MachineInstr
*DefMI
= MRI
.getVRegDef(Val
);
648 const TargetMachine
& TM
= DefMI
->getMF()->getTarget();
649 if (DefMI
->getFlag(MachineInstr::FmNoNans
) || TM
.Options
.NoNaNsFPMath
)
652 // If the value is a constant, we can obviously see if it is a NaN or not.
653 if (const ConstantFP
*FPVal
= getConstantFPVRegVal(Val
, MRI
)) {
654 return !FPVal
->getValueAPF().isNaN() ||
655 (SNaN
&& !FPVal
->getValueAPF().isSignaling());
658 if (DefMI
->getOpcode() == TargetOpcode::G_BUILD_VECTOR
) {
659 for (const auto &Op
: DefMI
->uses())
660 if (!isKnownNeverNaN(Op
.getReg(), MRI
, SNaN
))
665 switch (DefMI
->getOpcode()) {
668 case TargetOpcode::G_FADD
:
669 case TargetOpcode::G_FSUB
:
670 case TargetOpcode::G_FMUL
:
671 case TargetOpcode::G_FDIV
:
672 case TargetOpcode::G_FREM
:
673 case TargetOpcode::G_FSIN
:
674 case TargetOpcode::G_FCOS
:
675 case TargetOpcode::G_FMA
:
676 case TargetOpcode::G_FMAD
:
680 // TODO: Need isKnownNeverInfinity
682 case TargetOpcode::G_FMINNUM_IEEE
:
683 case TargetOpcode::G_FMAXNUM_IEEE
: {
686 // This can return a NaN if either operand is an sNaN, or if both operands
688 return (isKnownNeverNaN(DefMI
->getOperand(1).getReg(), MRI
) &&
689 isKnownNeverSNaN(DefMI
->getOperand(2).getReg(), MRI
)) ||
690 (isKnownNeverSNaN(DefMI
->getOperand(1).getReg(), MRI
) &&
691 isKnownNeverNaN(DefMI
->getOperand(2).getReg(), MRI
));
693 case TargetOpcode::G_FMINNUM
:
694 case TargetOpcode::G_FMAXNUM
: {
695 // Only one needs to be known not-nan, since it will be returned if the
696 // other ends up being one.
697 return isKnownNeverNaN(DefMI
->getOperand(1).getReg(), MRI
, SNaN
) ||
698 isKnownNeverNaN(DefMI
->getOperand(2).getReg(), MRI
, SNaN
);
703 // FP operations quiet. For now, just handle the ones inserted during
705 switch (DefMI
->getOpcode()) {
706 case TargetOpcode::G_FPEXT
:
707 case TargetOpcode::G_FPTRUNC
:
708 case TargetOpcode::G_FCANONICALIZE
:
718 Align
llvm::inferAlignFromPtrInfo(MachineFunction
&MF
,
719 const MachinePointerInfo
&MPO
) {
720 auto PSV
= dyn_cast_if_present
<const PseudoSourceValue
*>(MPO
.V
);
721 if (auto FSPV
= dyn_cast_or_null
<FixedStackPseudoSourceValue
>(PSV
)) {
722 MachineFrameInfo
&MFI
= MF
.getFrameInfo();
723 return commonAlignment(MFI
.getObjectAlign(FSPV
->getFrameIndex()),
727 if (const Value
*V
= dyn_cast_if_present
<const Value
*>(MPO
.V
)) {
728 const Module
*M
= MF
.getFunction().getParent();
729 return V
->getPointerAlignment(M
->getDataLayout());
735 Register
llvm::getFunctionLiveInPhysReg(MachineFunction
&MF
,
736 const TargetInstrInfo
&TII
,
738 const TargetRegisterClass
&RC
,
739 const DebugLoc
&DL
, LLT RegTy
) {
740 MachineBasicBlock
&EntryMBB
= MF
.front();
741 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
742 Register LiveIn
= MRI
.getLiveInVirtReg(PhysReg
);
744 MachineInstr
*Def
= MRI
.getVRegDef(LiveIn
);
746 // FIXME: Should the verifier check this is in the entry block?
747 assert(Def
->getParent() == &EntryMBB
&& "live-in copy not in entry block");
751 // It's possible the incoming argument register and copy was added during
752 // lowering, but later deleted due to being/becoming dead. If this happens,
753 // re-insert the copy.
755 // The live in register was not present, so add it.
756 LiveIn
= MF
.addLiveIn(PhysReg
, &RC
);
758 MRI
.setType(LiveIn
, RegTy
);
761 BuildMI(EntryMBB
, EntryMBB
.begin(), DL
, TII
.get(TargetOpcode::COPY
), LiveIn
)
763 if (!EntryMBB
.isLiveIn(PhysReg
))
764 EntryMBB
.addLiveIn(PhysReg
);
768 std::optional
<APInt
> llvm::ConstantFoldExtOp(unsigned Opcode
,
769 const Register Op1
, uint64_t Imm
,
770 const MachineRegisterInfo
&MRI
) {
771 auto MaybeOp1Cst
= getIConstantVRegVal(Op1
, MRI
);
776 case TargetOpcode::G_SEXT_INREG
: {
777 LLT Ty
= MRI
.getType(Op1
);
778 return MaybeOp1Cst
->trunc(Imm
).sext(Ty
.getScalarSizeInBits());
785 std::optional
<APInt
> llvm::ConstantFoldCastOp(unsigned Opcode
, LLT DstTy
,
787 const MachineRegisterInfo
&MRI
) {
788 std::optional
<APInt
> Val
= getIConstantVRegVal(Op0
, MRI
);
792 const unsigned DstSize
= DstTy
.getScalarSizeInBits();
795 case TargetOpcode::G_SEXT
:
796 return Val
->sext(DstSize
);
797 case TargetOpcode::G_ZEXT
:
798 case TargetOpcode::G_ANYEXT
:
799 // TODO: DAG considers target preference when constant folding any_extend.
800 return Val
->zext(DstSize
);
805 llvm_unreachable("unexpected cast opcode to constant fold");
808 std::optional
<APFloat
>
809 llvm::ConstantFoldIntToFloat(unsigned Opcode
, LLT DstTy
, Register Src
,
810 const MachineRegisterInfo
&MRI
) {
811 assert(Opcode
== TargetOpcode::G_SITOFP
|| Opcode
== TargetOpcode::G_UITOFP
);
812 if (auto MaybeSrcVal
= getIConstantVRegVal(Src
, MRI
)) {
813 APFloat
DstVal(getFltSemanticForLLT(DstTy
));
814 DstVal
.convertFromAPInt(*MaybeSrcVal
, Opcode
== TargetOpcode::G_SITOFP
,
815 APFloat::rmNearestTiesToEven
);
821 std::optional
<SmallVector
<unsigned>>
822 llvm::ConstantFoldCTLZ(Register Src
, const MachineRegisterInfo
&MRI
) {
823 LLT Ty
= MRI
.getType(Src
);
824 SmallVector
<unsigned> FoldedCTLZs
;
825 auto tryFoldScalar
= [&](Register R
) -> std::optional
<unsigned> {
826 auto MaybeCst
= getIConstantVRegVal(R
, MRI
);
829 return MaybeCst
->countl_zero();
832 // Try to constant fold each element.
833 auto *BV
= getOpcodeDef
<GBuildVector
>(Src
, MRI
);
836 for (unsigned SrcIdx
= 0; SrcIdx
< BV
->getNumSources(); ++SrcIdx
) {
837 if (auto MaybeFold
= tryFoldScalar(BV
->getSourceReg(SrcIdx
))) {
838 FoldedCTLZs
.emplace_back(*MaybeFold
);
845 if (auto MaybeCst
= tryFoldScalar(Src
)) {
846 FoldedCTLZs
.emplace_back(*MaybeCst
);
852 bool llvm::isKnownToBeAPowerOfTwo(Register Reg
, const MachineRegisterInfo
&MRI
,
853 GISelKnownBits
*KB
) {
854 std::optional
<DefinitionAndSourceRegister
> DefSrcReg
=
855 getDefSrcRegIgnoringCopies(Reg
, MRI
);
859 const MachineInstr
&MI
= *DefSrcReg
->MI
;
860 const LLT Ty
= MRI
.getType(Reg
);
862 switch (MI
.getOpcode()) {
863 case TargetOpcode::G_CONSTANT
: {
864 unsigned BitWidth
= Ty
.getScalarSizeInBits();
865 const ConstantInt
*CI
= MI
.getOperand(1).getCImm();
866 return CI
->getValue().zextOrTrunc(BitWidth
).isPowerOf2();
868 case TargetOpcode::G_SHL
: {
869 // A left-shift of a constant one will have exactly one bit set because
870 // shifting the bit off the end is undefined.
872 // TODO: Constant splat
873 if (auto ConstLHS
= getIConstantVRegVal(MI
.getOperand(1).getReg(), MRI
)) {
880 case TargetOpcode::G_LSHR
: {
881 if (auto ConstLHS
= getIConstantVRegVal(MI
.getOperand(1).getReg(), MRI
)) {
882 if (ConstLHS
->isSignMask())
888 case TargetOpcode::G_BUILD_VECTOR
: {
889 // TODO: Probably should have a recursion depth guard since you could have
890 // bitcasted vector elements.
891 for (const MachineOperand
&MO
: llvm::drop_begin(MI
.operands()))
892 if (!isKnownToBeAPowerOfTwo(MO
.getReg(), MRI
, KB
))
897 case TargetOpcode::G_BUILD_VECTOR_TRUNC
: {
898 // Only handle constants since we would need to know if number of leading
899 // zeros is greater than the truncation amount.
900 const unsigned BitWidth
= Ty
.getScalarSizeInBits();
901 for (const MachineOperand
&MO
: llvm::drop_begin(MI
.operands())) {
902 auto Const
= getIConstantVRegVal(MO
.getReg(), MRI
);
903 if (!Const
|| !Const
->zextOrTrunc(BitWidth
).isPowerOf2())
916 // More could be done here, though the above checks are enough
917 // to handle some common cases.
919 // Fall back to computeKnownBits to catch other known cases.
920 KnownBits Known
= KB
->getKnownBits(Reg
);
921 return (Known
.countMaxPopulation() == 1) && (Known
.countMinPopulation() == 1);
924 void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage
&AU
) {
925 AU
.addPreserved
<StackProtector
>();
928 LLT
llvm::getLCMType(LLT OrigTy
, LLT TargetTy
) {
929 const unsigned OrigSize
= OrigTy
.getSizeInBits();
930 const unsigned TargetSize
= TargetTy
.getSizeInBits();
932 if (OrigSize
== TargetSize
)
935 if (OrigTy
.isVector()) {
936 const LLT OrigElt
= OrigTy
.getElementType();
938 if (TargetTy
.isVector()) {
939 const LLT TargetElt
= TargetTy
.getElementType();
941 if (OrigElt
.getSizeInBits() == TargetElt
.getSizeInBits()) {
943 std::gcd(OrigTy
.getNumElements(), TargetTy
.getNumElements());
944 // Prefer the original element type.
945 ElementCount Mul
= OrigTy
.getElementCount() * TargetTy
.getNumElements();
946 return LLT::vector(Mul
.divideCoefficientBy(GCDElts
),
947 OrigTy
.getElementType());
950 if (OrigElt
.getSizeInBits() == TargetSize
)
954 unsigned LCMSize
= std::lcm(OrigSize
, TargetSize
);
955 return LLT::fixed_vector(LCMSize
/ OrigElt
.getSizeInBits(), OrigElt
);
958 if (TargetTy
.isVector()) {
959 unsigned LCMSize
= std::lcm(OrigSize
, TargetSize
);
960 return LLT::fixed_vector(LCMSize
/ OrigSize
, OrigTy
);
963 unsigned LCMSize
= std::lcm(OrigSize
, TargetSize
);
965 // Preserve pointer types.
966 if (LCMSize
== OrigSize
)
968 if (LCMSize
== TargetSize
)
971 return LLT::scalar(LCMSize
);
974 LLT
llvm::getCoverTy(LLT OrigTy
, LLT TargetTy
) {
975 if (!OrigTy
.isVector() || !TargetTy
.isVector() || OrigTy
== TargetTy
||
976 (OrigTy
.getScalarSizeInBits() != TargetTy
.getScalarSizeInBits()))
977 return getLCMType(OrigTy
, TargetTy
);
979 unsigned OrigTyNumElts
= OrigTy
.getNumElements();
980 unsigned TargetTyNumElts
= TargetTy
.getNumElements();
981 if (OrigTyNumElts
% TargetTyNumElts
== 0)
984 unsigned NumElts
= alignTo(OrigTyNumElts
, TargetTyNumElts
);
985 return LLT::scalarOrVector(ElementCount::getFixed(NumElts
),
986 OrigTy
.getElementType());
989 LLT
llvm::getGCDType(LLT OrigTy
, LLT TargetTy
) {
990 const unsigned OrigSize
= OrigTy
.getSizeInBits();
991 const unsigned TargetSize
= TargetTy
.getSizeInBits();
993 if (OrigSize
== TargetSize
)
996 if (OrigTy
.isVector()) {
997 LLT OrigElt
= OrigTy
.getElementType();
998 if (TargetTy
.isVector()) {
999 LLT TargetElt
= TargetTy
.getElementType();
1000 if (OrigElt
.getSizeInBits() == TargetElt
.getSizeInBits()) {
1001 int GCD
= std::gcd(OrigTy
.getNumElements(), TargetTy
.getNumElements());
1002 return LLT::scalarOrVector(ElementCount::getFixed(GCD
), OrigElt
);
1005 // If the source is a vector of pointers, return a pointer element.
1006 if (OrigElt
.getSizeInBits() == TargetSize
)
1010 unsigned GCD
= std::gcd(OrigSize
, TargetSize
);
1011 if (GCD
== OrigElt
.getSizeInBits())
1014 // If we can't produce the original element type, we have to use a smaller
1016 if (GCD
< OrigElt
.getSizeInBits())
1017 return LLT::scalar(GCD
);
1018 return LLT::fixed_vector(GCD
/ OrigElt
.getSizeInBits(), OrigElt
);
1021 if (TargetTy
.isVector()) {
1022 // Try to preserve the original element type.
1023 LLT TargetElt
= TargetTy
.getElementType();
1024 if (TargetElt
.getSizeInBits() == OrigSize
)
1028 unsigned GCD
= std::gcd(OrigSize
, TargetSize
);
1029 return LLT::scalar(GCD
);
1032 std::optional
<int> llvm::getSplatIndex(MachineInstr
&MI
) {
1033 assert(MI
.getOpcode() == TargetOpcode::G_SHUFFLE_VECTOR
&&
1034 "Only G_SHUFFLE_VECTOR can have a splat index!");
1035 ArrayRef
<int> Mask
= MI
.getOperand(3).getShuffleMask();
1036 auto FirstDefinedIdx
= find_if(Mask
, [](int Elt
) { return Elt
>= 0; });
1038 // If all elements are undefined, this shuffle can be considered a splat.
1039 // Return 0 for better potential for callers to simplify.
1040 if (FirstDefinedIdx
== Mask
.end())
1043 // Make sure all remaining elements are either undef or the same
1044 // as the first non-undef value.
1045 int SplatValue
= *FirstDefinedIdx
;
1046 if (any_of(make_range(std::next(FirstDefinedIdx
), Mask
.end()),
1047 [&SplatValue
](int Elt
) { return Elt
>= 0 && Elt
!= SplatValue
; }))
1048 return std::nullopt
;
1053 static bool isBuildVectorOp(unsigned Opcode
) {
1054 return Opcode
== TargetOpcode::G_BUILD_VECTOR
||
1055 Opcode
== TargetOpcode::G_BUILD_VECTOR_TRUNC
;
1060 std::optional
<ValueAndVReg
> getAnyConstantSplat(Register VReg
,
1061 const MachineRegisterInfo
&MRI
,
1063 MachineInstr
*MI
= getDefIgnoringCopies(VReg
, MRI
);
1065 return std::nullopt
;
1067 bool isConcatVectorsOp
= MI
->getOpcode() == TargetOpcode::G_CONCAT_VECTORS
;
1068 if (!isBuildVectorOp(MI
->getOpcode()) && !isConcatVectorsOp
)
1069 return std::nullopt
;
1071 std::optional
<ValueAndVReg
> SplatValAndReg
;
1072 for (MachineOperand
&Op
: MI
->uses()) {
1073 Register Element
= Op
.getReg();
1074 // If we have a G_CONCAT_VECTOR, we recursively look into the
1075 // vectors that we're concatenating to see if they're splats.
1076 auto ElementValAndReg
=
1078 ? getAnyConstantSplat(Element
, MRI
, AllowUndef
)
1079 : getAnyConstantVRegValWithLookThrough(Element
, MRI
, true, true);
1081 // If AllowUndef, treat undef as value that will result in a constant splat.
1082 if (!ElementValAndReg
) {
1083 if (AllowUndef
&& isa
<GImplicitDef
>(MRI
.getVRegDef(Element
)))
1085 return std::nullopt
;
1088 // Record splat value
1089 if (!SplatValAndReg
)
1090 SplatValAndReg
= ElementValAndReg
;
1092 // Different constant than the one already recorded, not a constant splat.
1093 if (SplatValAndReg
->Value
!= ElementValAndReg
->Value
)
1094 return std::nullopt
;
1097 return SplatValAndReg
;
1100 } // end anonymous namespace
1102 bool llvm::isBuildVectorConstantSplat(const Register Reg
,
1103 const MachineRegisterInfo
&MRI
,
1104 int64_t SplatValue
, bool AllowUndef
) {
1105 if (auto SplatValAndReg
= getAnyConstantSplat(Reg
, MRI
, AllowUndef
))
1106 return mi_match(SplatValAndReg
->VReg
, MRI
, m_SpecificICst(SplatValue
));
1110 bool llvm::isBuildVectorConstantSplat(const MachineInstr
&MI
,
1111 const MachineRegisterInfo
&MRI
,
1112 int64_t SplatValue
, bool AllowUndef
) {
1113 return isBuildVectorConstantSplat(MI
.getOperand(0).getReg(), MRI
, SplatValue
,
1117 std::optional
<APInt
>
1118 llvm::getIConstantSplatVal(const Register Reg
, const MachineRegisterInfo
&MRI
) {
1119 if (auto SplatValAndReg
=
1120 getAnyConstantSplat(Reg
, MRI
, /* AllowUndef */ false)) {
1121 std::optional
<ValueAndVReg
> ValAndVReg
=
1122 getIConstantVRegValWithLookThrough(SplatValAndReg
->VReg
, MRI
);
1123 return ValAndVReg
->Value
;
1126 return std::nullopt
;
1129 std::optional
<APInt
>
1130 llvm::getIConstantSplatVal(const MachineInstr
&MI
,
1131 const MachineRegisterInfo
&MRI
) {
1132 return getIConstantSplatVal(MI
.getOperand(0).getReg(), MRI
);
1135 std::optional
<int64_t>
1136 llvm::getIConstantSplatSExtVal(const Register Reg
,
1137 const MachineRegisterInfo
&MRI
) {
1138 if (auto SplatValAndReg
=
1139 getAnyConstantSplat(Reg
, MRI
, /* AllowUndef */ false))
1140 return getIConstantVRegSExtVal(SplatValAndReg
->VReg
, MRI
);
1141 return std::nullopt
;
1144 std::optional
<int64_t>
1145 llvm::getIConstantSplatSExtVal(const MachineInstr
&MI
,
1146 const MachineRegisterInfo
&MRI
) {
1147 return getIConstantSplatSExtVal(MI
.getOperand(0).getReg(), MRI
);
1150 std::optional
<FPValueAndVReg
>
1151 llvm::getFConstantSplat(Register VReg
, const MachineRegisterInfo
&MRI
,
1153 if (auto SplatValAndReg
= getAnyConstantSplat(VReg
, MRI
, AllowUndef
))
1154 return getFConstantVRegValWithLookThrough(SplatValAndReg
->VReg
, MRI
);
1155 return std::nullopt
;
1158 bool llvm::isBuildVectorAllZeros(const MachineInstr
&MI
,
1159 const MachineRegisterInfo
&MRI
,
1161 return isBuildVectorConstantSplat(MI
, MRI
, 0, AllowUndef
);
1164 bool llvm::isBuildVectorAllOnes(const MachineInstr
&MI
,
1165 const MachineRegisterInfo
&MRI
,
1167 return isBuildVectorConstantSplat(MI
, MRI
, -1, AllowUndef
);
1170 std::optional
<RegOrConstant
>
1171 llvm::getVectorSplat(const MachineInstr
&MI
, const MachineRegisterInfo
&MRI
) {
1172 unsigned Opc
= MI
.getOpcode();
1173 if (!isBuildVectorOp(Opc
))
1174 return std::nullopt
;
1175 if (auto Splat
= getIConstantSplatSExtVal(MI
, MRI
))
1176 return RegOrConstant(*Splat
);
1177 auto Reg
= MI
.getOperand(1).getReg();
1178 if (any_of(drop_begin(MI
.operands(), 2),
1179 [&Reg
](const MachineOperand
&Op
) { return Op
.getReg() != Reg
; }))
1180 return std::nullopt
;
1181 return RegOrConstant(Reg
);
1184 static bool isConstantScalar(const MachineInstr
&MI
,
1185 const MachineRegisterInfo
&MRI
,
1186 bool AllowFP
= true,
1187 bool AllowOpaqueConstants
= true) {
1188 switch (MI
.getOpcode()) {
1189 case TargetOpcode::G_CONSTANT
:
1190 case TargetOpcode::G_IMPLICIT_DEF
:
1192 case TargetOpcode::G_FCONSTANT
:
1194 case TargetOpcode::G_GLOBAL_VALUE
:
1195 case TargetOpcode::G_FRAME_INDEX
:
1196 case TargetOpcode::G_BLOCK_ADDR
:
1197 case TargetOpcode::G_JUMP_TABLE
:
1198 return AllowOpaqueConstants
;
1204 bool llvm::isConstantOrConstantVector(MachineInstr
&MI
,
1205 const MachineRegisterInfo
&MRI
) {
1206 Register Def
= MI
.getOperand(0).getReg();
1207 if (auto C
= getIConstantVRegValWithLookThrough(Def
, MRI
))
1209 GBuildVector
*BV
= dyn_cast
<GBuildVector
>(&MI
);
1212 for (unsigned SrcIdx
= 0; SrcIdx
< BV
->getNumSources(); ++SrcIdx
) {
1213 if (getIConstantVRegValWithLookThrough(BV
->getSourceReg(SrcIdx
), MRI
) ||
1214 getOpcodeDef
<GImplicitDef
>(BV
->getSourceReg(SrcIdx
), MRI
))
1221 bool llvm::isConstantOrConstantVector(const MachineInstr
&MI
,
1222 const MachineRegisterInfo
&MRI
,
1223 bool AllowFP
, bool AllowOpaqueConstants
) {
1224 if (isConstantScalar(MI
, MRI
, AllowFP
, AllowOpaqueConstants
))
1227 if (!isBuildVectorOp(MI
.getOpcode()))
1230 const unsigned NumOps
= MI
.getNumOperands();
1231 for (unsigned I
= 1; I
!= NumOps
; ++I
) {
1232 const MachineInstr
*ElementDef
= MRI
.getVRegDef(MI
.getOperand(I
).getReg());
1233 if (!isConstantScalar(*ElementDef
, MRI
, AllowFP
, AllowOpaqueConstants
))
1240 std::optional
<APInt
>
1241 llvm::isConstantOrConstantSplatVector(MachineInstr
&MI
,
1242 const MachineRegisterInfo
&MRI
) {
1243 Register Def
= MI
.getOperand(0).getReg();
1244 if (auto C
= getIConstantVRegValWithLookThrough(Def
, MRI
))
1246 auto MaybeCst
= getIConstantSplatSExtVal(MI
, MRI
);
1248 return std::nullopt
;
1249 const unsigned ScalarSize
= MRI
.getType(Def
).getScalarSizeInBits();
1250 return APInt(ScalarSize
, *MaybeCst
, true);
1253 bool llvm::isNullOrNullSplat(const MachineInstr
&MI
,
1254 const MachineRegisterInfo
&MRI
, bool AllowUndefs
) {
1255 switch (MI
.getOpcode()) {
1256 case TargetOpcode::G_IMPLICIT_DEF
:
1258 case TargetOpcode::G_CONSTANT
:
1259 return MI
.getOperand(1).getCImm()->isNullValue();
1260 case TargetOpcode::G_FCONSTANT
: {
1261 const ConstantFP
*FPImm
= MI
.getOperand(1).getFPImm();
1262 return FPImm
->isZero() && !FPImm
->isNegative();
1265 if (!AllowUndefs
) // TODO: isBuildVectorAllZeros assumes undef is OK already
1267 return isBuildVectorAllZeros(MI
, MRI
);
1271 bool llvm::isAllOnesOrAllOnesSplat(const MachineInstr
&MI
,
1272 const MachineRegisterInfo
&MRI
,
1274 switch (MI
.getOpcode()) {
1275 case TargetOpcode::G_IMPLICIT_DEF
:
1277 case TargetOpcode::G_CONSTANT
:
1278 return MI
.getOperand(1).getCImm()->isAllOnesValue();
1280 if (!AllowUndefs
) // TODO: isBuildVectorAllOnes assumes undef is OK already
1282 return isBuildVectorAllOnes(MI
, MRI
);
1286 bool llvm::matchUnaryPredicate(
1287 const MachineRegisterInfo
&MRI
, Register Reg
,
1288 std::function
<bool(const Constant
*ConstVal
)> Match
, bool AllowUndefs
) {
1290 const MachineInstr
*Def
= getDefIgnoringCopies(Reg
, MRI
);
1291 if (AllowUndefs
&& Def
->getOpcode() == TargetOpcode::G_IMPLICIT_DEF
)
1292 return Match(nullptr);
1294 // TODO: Also handle fconstant
1295 if (Def
->getOpcode() == TargetOpcode::G_CONSTANT
)
1296 return Match(Def
->getOperand(1).getCImm());
1298 if (Def
->getOpcode() != TargetOpcode::G_BUILD_VECTOR
)
1301 for (unsigned I
= 1, E
= Def
->getNumOperands(); I
!= E
; ++I
) {
1302 Register SrcElt
= Def
->getOperand(I
).getReg();
1303 const MachineInstr
*SrcDef
= getDefIgnoringCopies(SrcElt
, MRI
);
1304 if (AllowUndefs
&& SrcDef
->getOpcode() == TargetOpcode::G_IMPLICIT_DEF
) {
1305 if (!Match(nullptr))
1310 if (SrcDef
->getOpcode() != TargetOpcode::G_CONSTANT
||
1311 !Match(SrcDef
->getOperand(1).getCImm()))
1318 bool llvm::isConstTrueVal(const TargetLowering
&TLI
, int64_t Val
, bool IsVector
,
1320 switch (TLI
.getBooleanContents(IsVector
, IsFP
)) {
1321 case TargetLowering::UndefinedBooleanContent
:
1323 case TargetLowering::ZeroOrOneBooleanContent
:
1325 case TargetLowering::ZeroOrNegativeOneBooleanContent
:
1328 llvm_unreachable("Invalid boolean contents");
1331 bool llvm::isConstFalseVal(const TargetLowering
&TLI
, int64_t Val
,
1332 bool IsVector
, bool IsFP
) {
1333 switch (TLI
.getBooleanContents(IsVector
, IsFP
)) {
1334 case TargetLowering::UndefinedBooleanContent
:
1336 case TargetLowering::ZeroOrOneBooleanContent
:
1337 case TargetLowering::ZeroOrNegativeOneBooleanContent
:
1340 llvm_unreachable("Invalid boolean contents");
1343 int64_t llvm::getICmpTrueVal(const TargetLowering
&TLI
, bool IsVector
,
1345 switch (TLI
.getBooleanContents(IsVector
, IsFP
)) {
1346 case TargetLowering::UndefinedBooleanContent
:
1347 case TargetLowering::ZeroOrOneBooleanContent
:
1349 case TargetLowering::ZeroOrNegativeOneBooleanContent
:
1352 llvm_unreachable("Invalid boolean contents");
1355 bool llvm::shouldOptForSize(const MachineBasicBlock
&MBB
,
1356 ProfileSummaryInfo
*PSI
, BlockFrequencyInfo
*BFI
) {
1357 const auto &F
= MBB
.getParent()->getFunction();
1358 return F
.hasOptSize() || F
.hasMinSize() ||
1359 llvm::shouldOptimizeForSize(MBB
.getBasicBlock(), PSI
, BFI
);
1362 void llvm::saveUsesAndErase(MachineInstr
&MI
, MachineRegisterInfo
&MRI
,
1363 LostDebugLocObserver
*LocObserver
,
1364 SmallInstListTy
&DeadInstChain
) {
1365 for (MachineOperand
&Op
: MI
.uses()) {
1366 if (Op
.isReg() && Op
.getReg().isVirtual())
1367 DeadInstChain
.insert(MRI
.getVRegDef(Op
.getReg()));
1369 LLVM_DEBUG(dbgs() << MI
<< "Is dead; erasing.\n");
1370 DeadInstChain
.remove(&MI
);
1371 MI
.eraseFromParent();
1373 LocObserver
->checkpoint(false);
1376 void llvm::eraseInstrs(ArrayRef
<MachineInstr
*> DeadInstrs
,
1377 MachineRegisterInfo
&MRI
,
1378 LostDebugLocObserver
*LocObserver
) {
1379 SmallInstListTy DeadInstChain
;
1380 for (MachineInstr
*MI
: DeadInstrs
)
1381 saveUsesAndErase(*MI
, MRI
, LocObserver
, DeadInstChain
);
1383 while (!DeadInstChain
.empty()) {
1384 MachineInstr
*Inst
= DeadInstChain
.pop_back_val();
1385 if (!isTriviallyDead(*Inst
, MRI
))
1387 saveUsesAndErase(*Inst
, MRI
, LocObserver
, DeadInstChain
);
1391 void llvm::eraseInstr(MachineInstr
&MI
, MachineRegisterInfo
&MRI
,
1392 LostDebugLocObserver
*LocObserver
) {
1393 return eraseInstrs({&MI
}, MRI
, LocObserver
);
1396 void llvm::salvageDebugInfo(const MachineRegisterInfo
&MRI
, MachineInstr
&MI
) {
1397 for (auto &Def
: MI
.defs()) {
1398 assert(Def
.isReg() && "Must be a reg");
1400 SmallVector
<MachineOperand
*, 16> DbgUsers
;
1401 for (auto &MOUse
: MRI
.use_operands(Def
.getReg())) {
1402 MachineInstr
*DbgValue
= MOUse
.getParent();
1403 // Ignore partially formed DBG_VALUEs.
1404 if (DbgValue
->isNonListDebugValue() && DbgValue
->getNumOperands() == 4) {
1405 DbgUsers
.push_back(&MOUse
);
1409 if (!DbgUsers
.empty()) {
1410 salvageDebugInfoForDbgValue(MRI
, MI
, DbgUsers
);