[InstCombine] Remove insertRangeTest code that handles the equality case.
[llvm-complete.git] / lib / CodeGen / GlobalISel / Utils.cpp
blob766ea1d60bace02083928c132f2b6624bf84182f
1 //===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file This file implements the utility functions used by the GlobalISel
9 /// pipeline.
10 //===----------------------------------------------------------------------===//
12 #include "llvm/CodeGen/GlobalISel/Utils.h"
13 #include "llvm/ADT/APFloat.h"
14 #include "llvm/ADT/Twine.h"
15 #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
16 #include "llvm/CodeGen/MachineInstr.h"
17 #include "llvm/CodeGen/MachineInstrBuilder.h"
18 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/StackProtector.h"
21 #include "llvm/CodeGen/TargetInstrInfo.h"
22 #include "llvm/CodeGen/TargetPassConfig.h"
23 #include "llvm/CodeGen/TargetRegisterInfo.h"
24 #include "llvm/IR/Constants.h"
26 #define DEBUG_TYPE "globalisel-utils"
28 using namespace llvm;
30 unsigned llvm::constrainRegToClass(MachineRegisterInfo &MRI,
31 const TargetInstrInfo &TII,
32 const RegisterBankInfo &RBI, unsigned Reg,
33 const TargetRegisterClass &RegClass) {
34 if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
35 return MRI.createVirtualRegister(&RegClass);
37 return Reg;
40 unsigned llvm::constrainOperandRegClass(
41 const MachineFunction &MF, const TargetRegisterInfo &TRI,
42 MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
43 const RegisterBankInfo &RBI, MachineInstr &InsertPt,
44 const TargetRegisterClass &RegClass, const MachineOperand &RegMO,
45 unsigned OpIdx) {
46 unsigned Reg = RegMO.getReg();
47 // Assume physical registers are properly constrained.
48 assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
49 "PhysReg not implemented");
51 unsigned ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
52 // If we created a new virtual register because the class is not compatible
53 // then create a copy between the new and the old register.
54 if (ConstrainedReg != Reg) {
55 MachineBasicBlock::iterator InsertIt(&InsertPt);
56 MachineBasicBlock &MBB = *InsertPt.getParent();
57 if (RegMO.isUse()) {
58 BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(),
59 TII.get(TargetOpcode::COPY), ConstrainedReg)
60 .addReg(Reg);
61 } else {
62 assert(RegMO.isDef() && "Must be a definition");
63 BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(),
64 TII.get(TargetOpcode::COPY), Reg)
65 .addReg(ConstrainedReg);
68 return ConstrainedReg;
71 unsigned llvm::constrainOperandRegClass(
72 const MachineFunction &MF, const TargetRegisterInfo &TRI,
73 MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
74 const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
75 const MachineOperand &RegMO, unsigned OpIdx) {
76 unsigned Reg = RegMO.getReg();
77 // Assume physical registers are properly constrained.
78 assert(TargetRegisterInfo::isVirtualRegister(Reg) &&
79 "PhysReg not implemented");
81 const TargetRegisterClass *RegClass = TII.getRegClass(II, OpIdx, &TRI, MF);
82 // Some of the target independent instructions, like COPY, may not impose any
83 // register class constraints on some of their operands: If it's a use, we can
84 // skip constraining as the instruction defining the register would constrain
85 // it.
87 // We can't constrain unallocatable register classes, because we can't create
88 // virtual registers for these classes, so we need to let targets handled this
89 // case.
90 if (RegClass && !RegClass->isAllocatable())
91 RegClass = TRI.getConstrainedRegClassForOperand(RegMO, MRI);
93 if (!RegClass) {
94 assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
95 "Register class constraint is required unless either the "
96 "instruction is target independent or the operand is a use");
97 // FIXME: Just bailing out like this here could be not enough, unless we
98 // expect the users of this function to do the right thing for PHIs and
99 // COPY:
100 // v1 = COPY v0
101 // v2 = COPY v1
102 // v1 here may end up not being constrained at all. Please notice that to
103 // reproduce the issue we likely need a destination pattern of a selection
104 // rule producing such extra copies, not just an input GMIR with them as
105 // every existing target using selectImpl handles copies before calling it
106 // and they never reach this function.
107 return Reg;
109 return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *RegClass,
110 RegMO, OpIdx);
113 bool llvm::constrainSelectedInstRegOperands(MachineInstr &I,
114 const TargetInstrInfo &TII,
115 const TargetRegisterInfo &TRI,
116 const RegisterBankInfo &RBI) {
117 assert(!isPreISelGenericOpcode(I.getOpcode()) &&
118 "A selected instruction is expected");
119 MachineBasicBlock &MBB = *I.getParent();
120 MachineFunction &MF = *MBB.getParent();
121 MachineRegisterInfo &MRI = MF.getRegInfo();
123 for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
124 MachineOperand &MO = I.getOperand(OpI);
126 // There's nothing to be done on non-register operands.
127 if (!MO.isReg())
128 continue;
130 LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
131 assert(MO.isReg() && "Unsupported non-reg operand");
133 unsigned Reg = MO.getReg();
134 // Physical registers don't need to be constrained.
135 if (TRI.isPhysicalRegister(Reg))
136 continue;
138 // Register operands with a value of 0 (e.g. predicate operands) don't need
139 // to be constrained.
140 if (Reg == 0)
141 continue;
143 // If the operand is a vreg, we should constrain its regclass, and only
144 // insert COPYs if that's impossible.
145 // constrainOperandRegClass does that for us.
146 MO.setReg(constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(),
147 MO, OpI));
149 // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
150 // done.
151 if (MO.isUse()) {
152 int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
153 if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
154 I.tieOperands(DefIdx, OpI);
157 return true;
160 bool llvm::isTriviallyDead(const MachineInstr &MI,
161 const MachineRegisterInfo &MRI) {
162 // If we can move an instruction, we can remove it. Otherwise, it has
163 // a side-effect of some sort.
164 bool SawStore = false;
165 if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI())
166 return false;
168 // Instructions without side-effects are dead iff they only define dead vregs.
169 for (auto &MO : MI.operands()) {
170 if (!MO.isReg() || !MO.isDef())
171 continue;
173 unsigned Reg = MO.getReg();
174 if (TargetRegisterInfo::isPhysicalRegister(Reg) ||
175 !MRI.use_nodbg_empty(Reg))
176 return false;
178 return true;
181 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
182 MachineOptimizationRemarkEmitter &MORE,
183 MachineOptimizationRemarkMissed &R) {
184 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
186 // Print the function name explicitly if we don't have a debug location (which
187 // makes the diagnostic less useful) or if we're going to emit a raw error.
188 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
189 R << (" (in function: " + MF.getName() + ")").str();
191 if (TPC.isGlobalISelAbortEnabled())
192 report_fatal_error(R.getMsg());
193 else
194 MORE.emit(R);
197 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
198 MachineOptimizationRemarkEmitter &MORE,
199 const char *PassName, StringRef Msg,
200 const MachineInstr &MI) {
201 MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
202 MI.getDebugLoc(), MI.getParent());
203 R << Msg;
204 // Printing MI is expensive; only do it if expensive remarks are enabled.
205 if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName))
206 R << ": " << ore::MNV("Inst", MI);
207 reportGISelFailure(MF, TPC, MORE, R);
210 Optional<int64_t> llvm::getConstantVRegVal(unsigned VReg,
211 const MachineRegisterInfo &MRI) {
212 Optional<ValueAndVReg> ValAndVReg =
213 getConstantVRegValWithLookThrough(VReg, MRI, /*LookThroughInstrs*/ false);
214 assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
215 "Value found while looking through instrs");
216 if (!ValAndVReg)
217 return None;
218 return ValAndVReg->Value;
221 Optional<ValueAndVReg> llvm::getConstantVRegValWithLookThrough(
222 unsigned VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs) {
223 SmallVector<std::pair<unsigned, unsigned>, 4> SeenOpcodes;
224 MachineInstr *MI;
225 while ((MI = MRI.getVRegDef(VReg)) &&
226 MI->getOpcode() != TargetOpcode::G_CONSTANT && LookThroughInstrs) {
227 switch (MI->getOpcode()) {
228 case TargetOpcode::G_TRUNC:
229 case TargetOpcode::G_SEXT:
230 case TargetOpcode::G_ZEXT:
231 SeenOpcodes.push_back(std::make_pair(
232 MI->getOpcode(),
233 MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
234 VReg = MI->getOperand(1).getReg();
235 break;
236 case TargetOpcode::COPY:
237 VReg = MI->getOperand(1).getReg();
238 if (TargetRegisterInfo::isPhysicalRegister(VReg))
239 return None;
240 break;
241 case TargetOpcode::G_INTTOPTR:
242 VReg = MI->getOperand(1).getReg();
243 break;
244 default:
245 return None;
248 if (!MI || MI->getOpcode() != TargetOpcode::G_CONSTANT ||
249 (!MI->getOperand(1).isImm() && !MI->getOperand(1).isCImm()))
250 return None;
252 const MachineOperand &CstVal = MI->getOperand(1);
253 unsigned BitWidth = MRI.getType(MI->getOperand(0).getReg()).getSizeInBits();
254 APInt Val = CstVal.isImm() ? APInt(BitWidth, CstVal.getImm())
255 : CstVal.getCImm()->getValue();
256 assert(Val.getBitWidth() == BitWidth &&
257 "Value bitwidth doesn't match definition type");
258 while (!SeenOpcodes.empty()) {
259 std::pair<unsigned, unsigned> OpcodeAndSize = SeenOpcodes.pop_back_val();
260 switch (OpcodeAndSize.first) {
261 case TargetOpcode::G_TRUNC:
262 Val = Val.trunc(OpcodeAndSize.second);
263 break;
264 case TargetOpcode::G_SEXT:
265 Val = Val.sext(OpcodeAndSize.second);
266 break;
267 case TargetOpcode::G_ZEXT:
268 Val = Val.zext(OpcodeAndSize.second);
269 break;
273 if (Val.getBitWidth() > 64)
274 return None;
276 return ValueAndVReg{Val.getSExtValue(), VReg};
279 const llvm::ConstantFP* llvm::getConstantFPVRegVal(unsigned VReg,
280 const MachineRegisterInfo &MRI) {
281 MachineInstr *MI = MRI.getVRegDef(VReg);
282 if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
283 return nullptr;
284 return MI->getOperand(1).getFPImm();
287 llvm::MachineInstr *llvm::getDefIgnoringCopies(Register Reg,
288 const MachineRegisterInfo &MRI) {
289 auto *DefMI = MRI.getVRegDef(Reg);
290 auto DstTy = MRI.getType(DefMI->getOperand(0).getReg());
291 if (!DstTy.isValid())
292 return nullptr;
293 while (DefMI->getOpcode() == TargetOpcode::COPY) {
294 unsigned SrcReg = DefMI->getOperand(1).getReg();
295 auto SrcTy = MRI.getType(SrcReg);
296 if (!SrcTy.isValid() || SrcTy != DstTy)
297 break;
298 DefMI = MRI.getVRegDef(SrcReg);
300 return DefMI;
303 llvm::MachineInstr *llvm::getOpcodeDef(unsigned Opcode, Register Reg,
304 const MachineRegisterInfo &MRI) {
305 MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI);
306 return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
309 APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
310 if (Size == 32)
311 return APFloat(float(Val));
312 if (Size == 64)
313 return APFloat(Val);
314 if (Size != 16)
315 llvm_unreachable("Unsupported FPConstant size");
316 bool Ignored;
317 APFloat APF(Val);
318 APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
319 return APF;
322 Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const unsigned Op1,
323 const unsigned Op2,
324 const MachineRegisterInfo &MRI) {
325 auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI);
326 auto MaybeOp2Cst = getConstantVRegVal(Op2, MRI);
327 if (MaybeOp1Cst && MaybeOp2Cst) {
328 LLT Ty = MRI.getType(Op1);
329 APInt C1(Ty.getSizeInBits(), *MaybeOp1Cst, true);
330 APInt C2(Ty.getSizeInBits(), *MaybeOp2Cst, true);
331 switch (Opcode) {
332 default:
333 break;
334 case TargetOpcode::G_ADD:
335 return C1 + C2;
336 case TargetOpcode::G_AND:
337 return C1 & C2;
338 case TargetOpcode::G_ASHR:
339 return C1.ashr(C2);
340 case TargetOpcode::G_LSHR:
341 return C1.lshr(C2);
342 case TargetOpcode::G_MUL:
343 return C1 * C2;
344 case TargetOpcode::G_OR:
345 return C1 | C2;
346 case TargetOpcode::G_SHL:
347 return C1 << C2;
348 case TargetOpcode::G_SUB:
349 return C1 - C2;
350 case TargetOpcode::G_XOR:
351 return C1 ^ C2;
352 case TargetOpcode::G_UDIV:
353 if (!C2.getBoolValue())
354 break;
355 return C1.udiv(C2);
356 case TargetOpcode::G_SDIV:
357 if (!C2.getBoolValue())
358 break;
359 return C1.sdiv(C2);
360 case TargetOpcode::G_UREM:
361 if (!C2.getBoolValue())
362 break;
363 return C1.urem(C2);
364 case TargetOpcode::G_SREM:
365 if (!C2.getBoolValue())
366 break;
367 return C1.srem(C2);
370 return None;
373 bool llvm::isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
374 bool SNaN) {
375 const MachineInstr *DefMI = MRI.getVRegDef(Val);
376 if (!DefMI)
377 return false;
379 if (DefMI->getFlag(MachineInstr::FmNoNans))
380 return true;
382 if (SNaN) {
383 // FP operations quiet. For now, just handle the ones inserted during
384 // legalization.
385 switch (DefMI->getOpcode()) {
386 case TargetOpcode::G_FPEXT:
387 case TargetOpcode::G_FPTRUNC:
388 case TargetOpcode::G_FCANONICALIZE:
389 return true;
390 default:
391 return false;
395 return false;
398 void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) {
399 AU.addPreserved<StackProtector>();