[InstCombine] Signed saturation patterns
[llvm-core.git] / lib / CodeGen / GlobalISel / Utils.cpp
blob45618d7992ad244d859eecf8a7e0adbb5f7f47af
1 //===- llvm/CodeGen/GlobalISel/Utils.cpp -------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file This file implements the utility functions used by the GlobalISel
9 /// pipeline.
10 //===----------------------------------------------------------------------===//
12 #include "llvm/CodeGen/GlobalISel/Utils.h"
13 #include "llvm/ADT/APFloat.h"
14 #include "llvm/ADT/Twine.h"
15 #include "llvm/CodeGen/GlobalISel/RegisterBankInfo.h"
16 #include "llvm/CodeGen/MachineInstr.h"
17 #include "llvm/CodeGen/MachineInstrBuilder.h"
18 #include "llvm/CodeGen/MachineOptimizationRemarkEmitter.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/StackProtector.h"
21 #include "llvm/CodeGen/TargetInstrInfo.h"
22 #include "llvm/CodeGen/TargetPassConfig.h"
23 #include "llvm/CodeGen/TargetRegisterInfo.h"
24 #include "llvm/IR/Constants.h"
26 #define DEBUG_TYPE "globalisel-utils"
28 using namespace llvm;
30 unsigned llvm::constrainRegToClass(MachineRegisterInfo &MRI,
31 const TargetInstrInfo &TII,
32 const RegisterBankInfo &RBI, unsigned Reg,
33 const TargetRegisterClass &RegClass) {
34 if (!RBI.constrainGenericRegister(Reg, RegClass, MRI))
35 return MRI.createVirtualRegister(&RegClass);
37 return Reg;
40 unsigned llvm::constrainOperandRegClass(
41 const MachineFunction &MF, const TargetRegisterInfo &TRI,
42 MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
43 const RegisterBankInfo &RBI, MachineInstr &InsertPt,
44 const TargetRegisterClass &RegClass, const MachineOperand &RegMO,
45 unsigned OpIdx) {
46 Register Reg = RegMO.getReg();
47 // Assume physical registers are properly constrained.
48 assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
50 unsigned ConstrainedReg = constrainRegToClass(MRI, TII, RBI, Reg, RegClass);
51 // If we created a new virtual register because the class is not compatible
52 // then create a copy between the new and the old register.
53 if (ConstrainedReg != Reg) {
54 MachineBasicBlock::iterator InsertIt(&InsertPt);
55 MachineBasicBlock &MBB = *InsertPt.getParent();
56 if (RegMO.isUse()) {
57 BuildMI(MBB, InsertIt, InsertPt.getDebugLoc(),
58 TII.get(TargetOpcode::COPY), ConstrainedReg)
59 .addReg(Reg);
60 } else {
61 assert(RegMO.isDef() && "Must be a definition");
62 BuildMI(MBB, std::next(InsertIt), InsertPt.getDebugLoc(),
63 TII.get(TargetOpcode::COPY), Reg)
64 .addReg(ConstrainedReg);
67 return ConstrainedReg;
70 unsigned llvm::constrainOperandRegClass(
71 const MachineFunction &MF, const TargetRegisterInfo &TRI,
72 MachineRegisterInfo &MRI, const TargetInstrInfo &TII,
73 const RegisterBankInfo &RBI, MachineInstr &InsertPt, const MCInstrDesc &II,
74 const MachineOperand &RegMO, unsigned OpIdx) {
75 Register Reg = RegMO.getReg();
76 // Assume physical registers are properly constrained.
77 assert(Register::isVirtualRegister(Reg) && "PhysReg not implemented");
79 const TargetRegisterClass *RegClass = TII.getRegClass(II, OpIdx, &TRI, MF);
80 // Some of the target independent instructions, like COPY, may not impose any
81 // register class constraints on some of their operands: If it's a use, we can
82 // skip constraining as the instruction defining the register would constrain
83 // it.
85 // We can't constrain unallocatable register classes, because we can't create
86 // virtual registers for these classes, so we need to let targets handled this
87 // case.
88 if (RegClass && !RegClass->isAllocatable())
89 RegClass = TRI.getConstrainedRegClassForOperand(RegMO, MRI);
91 if (!RegClass) {
92 assert((!isTargetSpecificOpcode(II.getOpcode()) || RegMO.isUse()) &&
93 "Register class constraint is required unless either the "
94 "instruction is target independent or the operand is a use");
95 // FIXME: Just bailing out like this here could be not enough, unless we
96 // expect the users of this function to do the right thing for PHIs and
97 // COPY:
98 // v1 = COPY v0
99 // v2 = COPY v1
100 // v1 here may end up not being constrained at all. Please notice that to
101 // reproduce the issue we likely need a destination pattern of a selection
102 // rule producing such extra copies, not just an input GMIR with them as
103 // every existing target using selectImpl handles copies before calling it
104 // and they never reach this function.
105 return Reg;
107 return constrainOperandRegClass(MF, TRI, MRI, TII, RBI, InsertPt, *RegClass,
108 RegMO, OpIdx);
111 bool llvm::constrainSelectedInstRegOperands(MachineInstr &I,
112 const TargetInstrInfo &TII,
113 const TargetRegisterInfo &TRI,
114 const RegisterBankInfo &RBI) {
115 assert(!isPreISelGenericOpcode(I.getOpcode()) &&
116 "A selected instruction is expected");
117 MachineBasicBlock &MBB = *I.getParent();
118 MachineFunction &MF = *MBB.getParent();
119 MachineRegisterInfo &MRI = MF.getRegInfo();
121 for (unsigned OpI = 0, OpE = I.getNumExplicitOperands(); OpI != OpE; ++OpI) {
122 MachineOperand &MO = I.getOperand(OpI);
124 // There's nothing to be done on non-register operands.
125 if (!MO.isReg())
126 continue;
128 LLVM_DEBUG(dbgs() << "Converting operand: " << MO << '\n');
129 assert(MO.isReg() && "Unsupported non-reg operand");
131 Register Reg = MO.getReg();
132 // Physical registers don't need to be constrained.
133 if (Register::isPhysicalRegister(Reg))
134 continue;
136 // Register operands with a value of 0 (e.g. predicate operands) don't need
137 // to be constrained.
138 if (Reg == 0)
139 continue;
141 // If the operand is a vreg, we should constrain its regclass, and only
142 // insert COPYs if that's impossible.
143 // constrainOperandRegClass does that for us.
144 MO.setReg(constrainOperandRegClass(MF, TRI, MRI, TII, RBI, I, I.getDesc(),
145 MO, OpI));
147 // Tie uses to defs as indicated in MCInstrDesc if this hasn't already been
148 // done.
149 if (MO.isUse()) {
150 int DefIdx = I.getDesc().getOperandConstraint(OpI, MCOI::TIED_TO);
151 if (DefIdx != -1 && !I.isRegTiedToUseOperand(DefIdx))
152 I.tieOperands(DefIdx, OpI);
155 return true;
158 bool llvm::isTriviallyDead(const MachineInstr &MI,
159 const MachineRegisterInfo &MRI) {
160 // If we can move an instruction, we can remove it. Otherwise, it has
161 // a side-effect of some sort.
162 bool SawStore = false;
163 if (!MI.isSafeToMove(/*AA=*/nullptr, SawStore) && !MI.isPHI())
164 return false;
166 // Instructions without side-effects are dead iff they only define dead vregs.
167 for (auto &MO : MI.operands()) {
168 if (!MO.isReg() || !MO.isDef())
169 continue;
171 Register Reg = MO.getReg();
172 if (Register::isPhysicalRegister(Reg) || !MRI.use_nodbg_empty(Reg))
173 return false;
175 return true;
178 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
179 MachineOptimizationRemarkEmitter &MORE,
180 MachineOptimizationRemarkMissed &R) {
181 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
183 // Print the function name explicitly if we don't have a debug location (which
184 // makes the diagnostic less useful) or if we're going to emit a raw error.
185 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
186 R << (" (in function: " + MF.getName() + ")").str();
188 if (TPC.isGlobalISelAbortEnabled())
189 report_fatal_error(R.getMsg());
190 else
191 MORE.emit(R);
194 void llvm::reportGISelFailure(MachineFunction &MF, const TargetPassConfig &TPC,
195 MachineOptimizationRemarkEmitter &MORE,
196 const char *PassName, StringRef Msg,
197 const MachineInstr &MI) {
198 MachineOptimizationRemarkMissed R(PassName, "GISelFailure: ",
199 MI.getDebugLoc(), MI.getParent());
200 R << Msg;
201 // Printing MI is expensive; only do it if expensive remarks are enabled.
202 if (TPC.isGlobalISelAbortEnabled() || MORE.allowExtraAnalysis(PassName))
203 R << ": " << ore::MNV("Inst", MI);
204 reportGISelFailure(MF, TPC, MORE, R);
207 Optional<int64_t> llvm::getConstantVRegVal(unsigned VReg,
208 const MachineRegisterInfo &MRI) {
209 Optional<ValueAndVReg> ValAndVReg =
210 getConstantVRegValWithLookThrough(VReg, MRI, /*LookThroughInstrs*/ false);
211 assert((!ValAndVReg || ValAndVReg->VReg == VReg) &&
212 "Value found while looking through instrs");
213 if (!ValAndVReg)
214 return None;
215 return ValAndVReg->Value;
218 Optional<ValueAndVReg> llvm::getConstantVRegValWithLookThrough(
219 unsigned VReg, const MachineRegisterInfo &MRI, bool LookThroughInstrs,
220 bool HandleFConstant) {
221 SmallVector<std::pair<unsigned, unsigned>, 4> SeenOpcodes;
222 MachineInstr *MI;
223 auto IsConstantOpcode = [HandleFConstant](unsigned Opcode) {
224 return Opcode == TargetOpcode::G_CONSTANT ||
225 (HandleFConstant && Opcode == TargetOpcode::G_FCONSTANT);
227 auto GetImmediateValue = [HandleFConstant,
228 &MRI](const MachineInstr &MI) -> Optional<APInt> {
229 const MachineOperand &CstVal = MI.getOperand(1);
230 if (!CstVal.isImm() && !CstVal.isCImm() &&
231 (!HandleFConstant || !CstVal.isFPImm()))
232 return None;
233 if (!CstVal.isFPImm()) {
234 unsigned BitWidth =
235 MRI.getType(MI.getOperand(0).getReg()).getSizeInBits();
236 APInt Val = CstVal.isImm() ? APInt(BitWidth, CstVal.getImm())
237 : CstVal.getCImm()->getValue();
238 assert(Val.getBitWidth() == BitWidth &&
239 "Value bitwidth doesn't match definition type");
240 return Val;
242 return CstVal.getFPImm()->getValueAPF().bitcastToAPInt();
244 while ((MI = MRI.getVRegDef(VReg)) && !IsConstantOpcode(MI->getOpcode()) &&
245 LookThroughInstrs) {
246 switch (MI->getOpcode()) {
247 case TargetOpcode::G_TRUNC:
248 case TargetOpcode::G_SEXT:
249 case TargetOpcode::G_ZEXT:
250 SeenOpcodes.push_back(std::make_pair(
251 MI->getOpcode(),
252 MRI.getType(MI->getOperand(0).getReg()).getSizeInBits()));
253 VReg = MI->getOperand(1).getReg();
254 break;
255 case TargetOpcode::COPY:
256 VReg = MI->getOperand(1).getReg();
257 if (Register::isPhysicalRegister(VReg))
258 return None;
259 break;
260 case TargetOpcode::G_INTTOPTR:
261 VReg = MI->getOperand(1).getReg();
262 break;
263 default:
264 return None;
267 if (!MI || !IsConstantOpcode(MI->getOpcode()))
268 return None;
270 Optional<APInt> MaybeVal = GetImmediateValue(*MI);
271 if (!MaybeVal)
272 return None;
273 APInt &Val = *MaybeVal;
274 while (!SeenOpcodes.empty()) {
275 std::pair<unsigned, unsigned> OpcodeAndSize = SeenOpcodes.pop_back_val();
276 switch (OpcodeAndSize.first) {
277 case TargetOpcode::G_TRUNC:
278 Val = Val.trunc(OpcodeAndSize.second);
279 break;
280 case TargetOpcode::G_SEXT:
281 Val = Val.sext(OpcodeAndSize.second);
282 break;
283 case TargetOpcode::G_ZEXT:
284 Val = Val.zext(OpcodeAndSize.second);
285 break;
289 if (Val.getBitWidth() > 64)
290 return None;
292 return ValueAndVReg{Val.getSExtValue(), VReg};
295 const llvm::ConstantFP* llvm::getConstantFPVRegVal(unsigned VReg,
296 const MachineRegisterInfo &MRI) {
297 MachineInstr *MI = MRI.getVRegDef(VReg);
298 if (TargetOpcode::G_FCONSTANT != MI->getOpcode())
299 return nullptr;
300 return MI->getOperand(1).getFPImm();
303 llvm::MachineInstr *llvm::getDefIgnoringCopies(Register Reg,
304 const MachineRegisterInfo &MRI) {
305 auto *DefMI = MRI.getVRegDef(Reg);
306 auto DstTy = MRI.getType(DefMI->getOperand(0).getReg());
307 if (!DstTy.isValid())
308 return nullptr;
309 while (DefMI->getOpcode() == TargetOpcode::COPY) {
310 Register SrcReg = DefMI->getOperand(1).getReg();
311 auto SrcTy = MRI.getType(SrcReg);
312 if (!SrcTy.isValid() || SrcTy != DstTy)
313 break;
314 DefMI = MRI.getVRegDef(SrcReg);
316 return DefMI;
319 llvm::MachineInstr *llvm::getOpcodeDef(unsigned Opcode, Register Reg,
320 const MachineRegisterInfo &MRI) {
321 MachineInstr *DefMI = getDefIgnoringCopies(Reg, MRI);
322 return DefMI && DefMI->getOpcode() == Opcode ? DefMI : nullptr;
325 APFloat llvm::getAPFloatFromSize(double Val, unsigned Size) {
326 if (Size == 32)
327 return APFloat(float(Val));
328 if (Size == 64)
329 return APFloat(Val);
330 if (Size != 16)
331 llvm_unreachable("Unsupported FPConstant size");
332 bool Ignored;
333 APFloat APF(Val);
334 APF.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &Ignored);
335 return APF;
338 Optional<APInt> llvm::ConstantFoldBinOp(unsigned Opcode, const unsigned Op1,
339 const unsigned Op2,
340 const MachineRegisterInfo &MRI) {
341 auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI);
342 auto MaybeOp2Cst = getConstantVRegVal(Op2, MRI);
343 if (MaybeOp1Cst && MaybeOp2Cst) {
344 LLT Ty = MRI.getType(Op1);
345 APInt C1(Ty.getSizeInBits(), *MaybeOp1Cst, true);
346 APInt C2(Ty.getSizeInBits(), *MaybeOp2Cst, true);
347 switch (Opcode) {
348 default:
349 break;
350 case TargetOpcode::G_ADD:
351 return C1 + C2;
352 case TargetOpcode::G_AND:
353 return C1 & C2;
354 case TargetOpcode::G_ASHR:
355 return C1.ashr(C2);
356 case TargetOpcode::G_LSHR:
357 return C1.lshr(C2);
358 case TargetOpcode::G_MUL:
359 return C1 * C2;
360 case TargetOpcode::G_OR:
361 return C1 | C2;
362 case TargetOpcode::G_SHL:
363 return C1 << C2;
364 case TargetOpcode::G_SUB:
365 return C1 - C2;
366 case TargetOpcode::G_XOR:
367 return C1 ^ C2;
368 case TargetOpcode::G_UDIV:
369 if (!C2.getBoolValue())
370 break;
371 return C1.udiv(C2);
372 case TargetOpcode::G_SDIV:
373 if (!C2.getBoolValue())
374 break;
375 return C1.sdiv(C2);
376 case TargetOpcode::G_UREM:
377 if (!C2.getBoolValue())
378 break;
379 return C1.urem(C2);
380 case TargetOpcode::G_SREM:
381 if (!C2.getBoolValue())
382 break;
383 return C1.srem(C2);
386 return None;
389 bool llvm::isKnownNeverNaN(Register Val, const MachineRegisterInfo &MRI,
390 bool SNaN) {
391 const MachineInstr *DefMI = MRI.getVRegDef(Val);
392 if (!DefMI)
393 return false;
395 if (DefMI->getFlag(MachineInstr::FmNoNans))
396 return true;
398 if (SNaN) {
399 // FP operations quiet. For now, just handle the ones inserted during
400 // legalization.
401 switch (DefMI->getOpcode()) {
402 case TargetOpcode::G_FPEXT:
403 case TargetOpcode::G_FPTRUNC:
404 case TargetOpcode::G_FCANONICALIZE:
405 return true;
406 default:
407 return false;
411 return false;
414 Optional<APInt> llvm::ConstantFoldExtOp(unsigned Opcode, const unsigned Op1,
415 uint64_t Imm,
416 const MachineRegisterInfo &MRI) {
417 auto MaybeOp1Cst = getConstantVRegVal(Op1, MRI);
418 if (MaybeOp1Cst) {
419 LLT Ty = MRI.getType(Op1);
420 APInt C1(Ty.getSizeInBits(), *MaybeOp1Cst, true);
421 switch (Opcode) {
422 default:
423 break;
424 case TargetOpcode::G_SEXT_INREG:
425 return C1.trunc(Imm).sext(C1.getBitWidth());
428 return None;
431 void llvm::getSelectionDAGFallbackAnalysisUsage(AnalysisUsage &AU) {
432 AU.addPreserved<StackProtector>();
435 MVT llvm::getMVTForLLT(LLT Ty) {
436 if (!Ty.isVector())
437 return MVT::getIntegerVT(Ty.getSizeInBits());
439 return MVT::getVectorVT(
440 MVT::getIntegerVT(Ty.getElementType().getSizeInBits()),
441 Ty.getNumElements());
444 LLT llvm::getLLTForMVT(MVT Ty) {
445 if (!Ty.isVector())
446 return LLT::scalar(Ty.getSizeInBits());
448 return LLT::vector(Ty.getVectorNumElements(),
449 Ty.getVectorElementType().getSizeInBits());