1 //===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //===----------------------------------------------------------------------===//
8 /// \file This file implements the utility functions used by the GlobalISel
10 //===----------------------------------------------------------------------===//
12 #include "llvm/CodeGen/GlobalISel/Utils.h"
13 #include "llvm/ADT/APFloat.h"
14 #include "llvm/ADT/Twine.h"
15 #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
16 #include "llvm/CodeGen/MachineInstr.h"
17 #include "llvm/CodeGen/MachineInstrBuilder.h"
18 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/StackProtector.h"
21 #include "llvm/CodeGen/TargetInstrInfo.h"
22 #include "llvm/CodeGen/TargetPassConfig.h"
23 #include "llvm/CodeGen/TargetRegisterInfo.h"
24 #include "llvm/IR/Constants.h"
26 #define DEBUG_TYPE "globalisel-utils"
30 unsigned llvm::constrainRegToClass(MachineRegisterInfo
&MRI
,
31 const TargetInstrInfo
&TII
,
32 const RegisterBankInfo
&RBI
, unsigned Reg
,
33 const TargetRegisterClass
&RegClass
) {
34 if (!RBI
.constrainGenericRegister(Reg
, RegClass
, MRI
))
35 return MRI
.createVirtualRegister(&RegClass
);
40 unsigned llvm::constrainOperandRegClass(
41 const MachineFunction
&MF
, const TargetRegisterInfo
&TRI
,
42 MachineRegisterInfo
&MRI
, const TargetInstrInfo
&TII
,
43 const RegisterBankInfo
&RBI
, MachineInstr
&InsertPt
,
44 const TargetRegisterClass
&RegClass
, const MachineOperand
&RegMO
,
46 Register Reg
= RegMO
.getReg();
47 // Assume physical registers are properly constrained.
48 assert(Register::isVirtualRegister(Reg
) && "PhysReg not implemented");
50 unsigned ConstrainedReg
= constrainRegToClass(MRI
, TII
, RBI
, Reg
, RegClass
);
51 // If we created a new virtual register because the class is not compatible
52 // then create a copy between the new and the old register.
53 if (ConstrainedReg
!= Reg
) {
54 MachineBasicBlock::iterator
InsertIt(&InsertPt
);
55 MachineBasicBlock
&MBB
= *InsertPt
.getParent();
57 BuildMI(MBB
, InsertIt
, InsertPt
.getDebugLoc(),
58 TII
.get(TargetOpcode::COPY
), ConstrainedReg
)
61 assert(RegMO
.isDef() && "Must be a definition");
62 BuildMI(MBB
, std::next(InsertIt
), InsertPt
.getDebugLoc(),
63 TII
.get(TargetOpcode::COPY
), Reg
)
64 .addReg(ConstrainedReg
);
67 return ConstrainedReg
;
70 unsigned llvm::constrainOperandRegClass(
71 const MachineFunction
&MF
, const TargetRegisterInfo
&TRI
,
72 MachineRegisterInfo
&MRI
, const TargetInstrInfo
&TII
,
73 const RegisterBankInfo
&RBI
, MachineInstr
&InsertPt
, const MCInstrDesc
&II
,
74 const MachineOperand
&RegMO
, unsigned OpIdx
) {
75 Register Reg
= RegMO
.getReg();
76 // Assume physical registers are properly constrained.
77 assert(Register::isVirtualRegister(Reg
) && "PhysReg not implemented");
79 const TargetRegisterClass
*RegClass
= TII
.getRegClass(II
, OpIdx
, &TRI
, MF
);
80 // Some of the target independent instructions, like COPY, may not impose any
81 // register class constraints on some of their operands: If it's a use, we can
82 // skip constraining as the instruction defining the register would constrain
85 // We can't constrain unallocatable register classes, because we can't create
86 // virtual registers for these classes, so we need to let targets handled this
88 if (RegClass
&& !RegClass
->isAllocatable())
89 RegClass
= TRI
.getConstrainedRegClassForOperand(RegMO
, MRI
);
92 assert((!isTargetSpecificOpcode(II
.getOpcode()) || RegMO
.isUse()) &&
93 "Register class constraint is required unless either the "
94 "instruction is target independent or the operand is a use");
95 // FIXME: Just bailing out like this here could be not enough, unless we
96 // expect the users of this function to do the right thing for PHIs and
100 // v1 here may end up not being constrained at all. Please notice that to
101 // reproduce the issue we likely need a destination pattern of a selection
102 // rule producing such extra copies, not just an input GMIR with them as
103 // every existing target using selectImpl handles copies before calling it
104 // and they never reach this function.
107 return constrainOperandRegClass(MF
, TRI
, MRI
, TII
, RBI
, InsertPt
, *RegClass
,
111 bool llvm::constrainSelectedInstRegOperands(MachineInstr
&I
,
112 const TargetInstrInfo
&TII
,
113 const TargetRegisterInfo
&TRI
,
114 const RegisterBankInfo
&RBI
) {
115 assert(!isPreISelGenericOpcode(I
.getOpcode()) &&
116 "A selected instruction is expected");
117 MachineBasicBlock
&MBB
= *I
.getParent();
118 MachineFunction
&MF
= *MBB
.getParent();
119 MachineRegisterInfo
&MRI
= MF
.getRegInfo();
121 for (unsigned OpI
= 0, OpE
= I
.getNumExplicitOperands(); OpI
!= OpE
; ++OpI
) {
122 MachineOperand
&MO
= I
.getOperand(OpI
);
124 // There's nothing to be done on non-register operands.
128 LLVM_DEBUG(dbgs() << "Converting operand: " << MO
<< '\n');
129 assert(MO
.isReg() && "Unsupported non-reg operand");
131 Register Reg
= MO
.getReg();
132 // Physical registers don't need to be constrained.
133 if (Register::isPhysicalRegister(Reg
))
136 // Register operands with a value of 0 (e.g. predicate operands) don't need
137 // to be constrained.
141 // If the operand is a vreg, we should constrain its regclass, and only
142 // insert COPYs if that's impossible.
143 // constrainOperandRegClass does that for us.
144 MO
.setReg(constrainOperandRegClass(MF
, TRI
, MRI
, TII
, RBI
, I
, I
.getDesc(),
147 // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
150 int DefIdx
= I
.getDesc().getOperandConstraint(OpI
, MCOI::TIED_TO
);
151 if (DefIdx
!= -1 && !I
.isRegTiedToUseOperand(DefIdx
))
152 I
.tieOperands(DefIdx
, OpI
);
158 bool llvm::isTriviallyDead(const MachineInstr
&MI
,
159 const MachineRegisterInfo
&MRI
) {
160 // If we can move an instruction, we can remove it. Otherwise, it has
161 // a side-effect of some sort.
162 bool SawStore
= false;
163 if (!MI
.isSafeToMove(/*AA=*/nullptr, SawStore
) && !MI
.isPHI())
166 // Instructions without side-effects are dead iff they only define dead vregs.
167 for (auto &MO
: MI
.operands()) {
168 if (!MO
.isReg() || !MO
.isDef())
171 Register Reg
= MO
.getReg();
172 if (Register::isPhysicalRegister(Reg
) || !MRI
.use_nodbg_empty(Reg
))
178 void llvm::reportGISelFailure(MachineFunction
&MF
, const TargetPassConfig
&TPC
,
179 MachineOptimizationRemarkEmitter
&MORE
,
180 MachineOptimizationRemarkMissed
&R
) {
181 MF
.getProperties().set(MachineFunctionProperties::Property::FailedISel
);
183 // Print the function name explicitly if we don't have a debug location (which
184 // makes the diagnostic less useful) or if we're going to emit a raw error.
185 if (!R
.getLocation().isValid() || TPC
.isGlobalISelAbortEnabled())
186 R
<< (" (in function: " + MF
.getName() + ")").str();
188 if (TPC
.isGlobalISelAbortEnabled())
189 report_fatal_error(R
.getMsg());
194 void llvm::reportGISelFailure(MachineFunction
&MF
, const TargetPassConfig
&TPC
,
195 MachineOptimizationRemarkEmitter
&MORE
,
196 const char *PassName
, StringRef Msg
,
197 const MachineInstr
&MI
) {
198 MachineOptimizationRemarkMissed
R(PassName
, "GISelFailure: ",
199 MI
.getDebugLoc(), MI
.getParent());
201 // Printing MI is expensive; only do it if expensive remarks are enabled.
202 if (TPC
.isGlobalISelAbortEnabled() || MORE
.allowExtraAnalysis(PassName
))
203 R
<< ": " << ore::MNV("Inst", MI
);
204 reportGISelFailure(MF
, TPC
, MORE
, R
);
207 Optional
<int64_t> llvm::getConstantVRegVal(unsigned VReg
,
208 const MachineRegisterInfo
&MRI
) {
209 Optional
<ValueAndVReg
> ValAndVReg
=
210 getConstantVRegValWithLookThrough(VReg
, MRI
, /*LookThroughInstrs*/ false);
211 assert((!ValAndVReg
|| ValAndVReg
->VReg
== VReg
) &&
212 "Value found while looking through instrs");
215 return ValAndVReg
->Value
;
218 Optional
<ValueAndVReg
> llvm::getConstantVRegValWithLookThrough(
219 unsigned VReg
, const MachineRegisterInfo
&MRI
, bool LookThroughInstrs
) {
220 SmallVector
<std::pair
<unsigned, unsigned>, 4> SeenOpcodes
;
222 while ((MI
= MRI
.getVRegDef(VReg
)) &&
223 MI
->getOpcode() != TargetOpcode::G_CONSTANT
&& LookThroughInstrs
) {
224 switch (MI
->getOpcode()) {
225 case TargetOpcode::G_TRUNC
:
226 case TargetOpcode::G_SEXT
:
227 case TargetOpcode::G_ZEXT
:
228 SeenOpcodes
.push_back(std::make_pair(
230 MRI
.getType(MI
->getOperand(0).getReg()).getSizeInBits()));
231 VReg
= MI
->getOperand(1).getReg();
233 case TargetOpcode::COPY
:
234 VReg
= MI
->getOperand(1).getReg();
235 if (Register::isPhysicalRegister(VReg
))
238 case TargetOpcode::G_INTTOPTR
:
239 VReg
= MI
->getOperand(1).getReg();
245 if (!MI
|| MI
->getOpcode() != TargetOpcode::G_CONSTANT
||
246 (!MI
->getOperand(1).isImm() && !MI
->getOperand(1).isCImm()))
249 const MachineOperand
&CstVal
= MI
->getOperand(1);
250 unsigned BitWidth
= MRI
.getType(MI
->getOperand(0).getReg()).getSizeInBits();
251 APInt Val
= CstVal
.isImm() ? APInt(BitWidth
, CstVal
.getImm())
252 : CstVal
.getCImm()->getValue();
253 assert(Val
.getBitWidth() == BitWidth
&&
254 "Value bitwidth doesn't match definition type");
255 while (!SeenOpcodes
.empty()) {
256 std::pair
<unsigned, unsigned> OpcodeAndSize
= SeenOpcodes
.pop_back_val();
257 switch (OpcodeAndSize
.first
) {
258 case TargetOpcode::G_TRUNC
:
259 Val
= Val
.trunc(OpcodeAndSize
.second
);
261 case TargetOpcode::G_SEXT
:
262 Val
= Val
.sext(OpcodeAndSize
.second
);
264 case TargetOpcode::G_ZEXT
:
265 Val
= Val
.zext(OpcodeAndSize
.second
);
270 if (Val
.getBitWidth() > 64)
273 return ValueAndVReg
{Val
.getSExtValue(), VReg
};
276 const llvm::ConstantFP
* llvm::getConstantFPVRegVal(unsigned VReg
,
277 const MachineRegisterInfo
&MRI
) {
278 MachineInstr
*MI
= MRI
.getVRegDef(VReg
);
279 if (TargetOpcode::G_FCONSTANT
!= MI
->getOpcode())
281 return MI
->getOperand(1).getFPImm();
284 llvm::MachineInstr
*llvm::getDefIgnoringCopies(Register Reg
,
285 const MachineRegisterInfo
&MRI
) {
286 auto *DefMI
= MRI
.getVRegDef(Reg
);
287 auto DstTy
= MRI
.getType(DefMI
->getOperand(0).getReg());
288 if (!DstTy
.isValid())
290 while (DefMI
->getOpcode() == TargetOpcode::COPY
) {
291 Register SrcReg
= DefMI
->getOperand(1).getReg();
292 auto SrcTy
= MRI
.getType(SrcReg
);
293 if (!SrcTy
.isValid() || SrcTy
!= DstTy
)
295 DefMI
= MRI
.getVRegDef(SrcReg
);
300 llvm::MachineInstr
*llvm::getOpcodeDef(unsigned Opcode
, Register Reg
,
301 const MachineRegisterInfo
&MRI
) {
302 MachineInstr
*DefMI
= getDefIgnoringCopies(Reg
, MRI
);
303 return DefMI
&& DefMI
->getOpcode() == Opcode
? DefMI
: nullptr;
306 APFloat
llvm::getAPFloatFromSize(double Val
, unsigned Size
) {
308 return APFloat(float(Val
));
312 llvm_unreachable("Unsupported FPConstant size");
315 APF
.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven
, &Ignored
);
319 Optional
<APInt
> llvm::ConstantFoldBinOp(unsigned Opcode
, const unsigned Op1
,
321 const MachineRegisterInfo
&MRI
) {
322 auto MaybeOp1Cst
= getConstantVRegVal(Op1
, MRI
);
323 auto MaybeOp2Cst
= getConstantVRegVal(Op2
, MRI
);
324 if (MaybeOp1Cst
&& MaybeOp2Cst
) {
325 LLT Ty
= MRI
.getType(Op1
);
326 APInt
C1(Ty
.getSizeInBits(), *MaybeOp1Cst
, true);
327 APInt
C2(Ty
.getSizeInBits(), *MaybeOp2Cst
, true);
331 case TargetOpcode::G_ADD
:
333 case TargetOpcode::G_AND
:
335 case TargetOpcode::G_ASHR
:
337 case TargetOpcode::G_LSHR
:
339 case TargetOpcode::G_MUL
:
341 case TargetOpcode::G_OR
:
343 case TargetOpcode::G_SHL
:
345 case TargetOpcode::G_SUB
:
347 case TargetOpcode::G_XOR
:
349 case TargetOpcode::G_UDIV
:
350 if (!C2
.getBoolValue())
353 case TargetOpcode::G_SDIV
:
354 if (!C2
.getBoolValue())
357 case TargetOpcode::G_UREM
:
358 if (!C2
.getBoolValue())
361 case TargetOpcode::G_SREM
:
362 if (!C2
.getBoolValue())
370 bool llvm::isKnownNeverNaN(Register Val
, const MachineRegisterInfo
&MRI
,
372 const MachineInstr
*DefMI
= MRI
.getVRegDef(Val
);
376 if (DefMI
->getFlag(MachineInstr::FmNoNans
))
380 // FP operations quiet. For now, just handle the ones inserted during
382 switch (DefMI
->getOpcode()) {
383 case TargetOpcode::G_FPEXT
:
384 case TargetOpcode::G_FPTRUNC
:
385 case TargetOpcode::G_FCANONICALIZE
:
395 Optional
<APInt
> llvm::ConstantFoldExtOp(unsigned Opcode
, const unsigned Op1
,
397 const MachineRegisterInfo
&MRI
) {
398 auto MaybeOp1Cst
= getConstantVRegVal(Op1
, MRI
);
400 LLT Ty
= MRI
.getType(Op1
);
401 APInt
C1(Ty
.getSizeInBits(), *MaybeOp1Cst
, true);
405 case TargetOpcode::G_SEXT_INREG
:
406 return C1
.trunc(Imm
).sext(C1
.getBitWidth());
412 void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage
&AU
) {
413 AU
.addPreserved
<StackProtector
>();
416 MVT
llvm::getMVTForLLT(LLT Ty
) {
418 return MVT::getIntegerVT(Ty
.getSizeInBits());
420 return MVT::getVectorVT(
421 MVT::getIntegerVT(Ty
.getElementType().getSizeInBits()),
422 Ty
.getNumElements());
425 LLT
llvm::getLLTForMVT(MVT Ty
) {
427 return LLT::scalar(Ty
.getSizeInBits());
429 return LLT::vector(Ty
.getVectorNumElements(),
430 Ty
.getVectorElementType().getSizeInBits());